changeset 49697:59c4713c5d21

Merge
author prr
date Fri, 13 Apr 2018 09:04:18 -0700
parents 508e9f6632fd fcff2daa6b1e
children 4c0c018a953f
files make/lib/Awt2dLibraries.gmk src/hotspot/share/gc/g1/concurrentMarkThread.cpp src/hotspot/share/gc/g1/concurrentMarkThread.hpp src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp src/hotspot/share/gc/g1/g1CardLiveData.cpp src/hotspot/share/gc/g1/g1CardLiveData.hpp src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp src/java.security.jgss/unix/native/libj2gss/NativeFunc.c src/java.security.jgss/unix/native/libj2gss/NativeFunc.h test/jdk/ProblemList.txt test/langtools/tools/javac/diags/examples/PreviewPlural/Bar.java test/langtools/tools/javac/diags/examples/PreviewPluralAdditional/Bar.java
diffstat 474 files changed, 12508 insertions(+), 7153 deletions(-) [+]
line wrap: on
line diff
--- a/make/autoconf/flags-cflags.m4	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/autoconf/flags-cflags.m4	Fri Apr 13 09:04:18 2018 -0700
@@ -453,6 +453,7 @@
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     ALWAYS_DEFINES_JDK="-DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE \
         -D_CRT_NONSTDC_NO_DEPRECATE -DWIN32 -DIAL"
+    ALWAYS_DEFINES_JVM="-DNOMINMAX"
   fi
 
   ###############################################################################
--- a/make/autoconf/libraries.m4	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/autoconf/libraries.m4	Fri Apr 13 09:04:18 2018 -0700
@@ -114,17 +114,7 @@
   fi
 
   # Math library
-  if test "x$OPENJDK_TARGET_OS" != xsolaris; then
-    BASIC_JVM_LIBS="$LIBM"
-  else
-    # FIXME: This hard-coded path is not really proper.
-    if test "x$OPENJDK_TARGET_CPU" = xx86_64; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/amd64/libm.so.1"
-    elif test "x$OPENJDK_TARGET_CPU" = xsparcv9; then
-      BASIC_SOLARIS_LIBM_LIBS="/usr/lib/sparcv9/libm.so.1"
-    fi
-    BASIC_JVM_LIBS="$BASIC_SOLARIS_LIBM_LIBS"
-  fi
+  BASIC_JVM_LIBS="$LIBM"
 
   # Dynamic loading library
   if test "x$OPENJDK_TARGET_OS" = xlinux || test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_OS" = xaix; then
--- a/make/autoconf/platform.m4	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/autoconf/platform.m4	Fri Apr 13 09:04:18 2018 -0700
@@ -60,6 +60,12 @@
       VAR_CPU_BITS=64
       VAR_CPU_ENDIAN=little
       ;;
+    ia64)
+      VAR_CPU=ia64
+      VAR_CPU_ARCH=ia64
+      VAR_CPU_BITS=64
+      VAR_CPU_ENDIAN=little
+      ;;
     m68k)
       VAR_CPU=m68k
       VAR_CPU_ARCH=m68k
--- a/make/hotspot/lib/CompileJvm.gmk	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/hotspot/lib/CompileJvm.gmk	Fri Apr 13 09:04:18 2018 -0700
@@ -113,6 +113,11 @@
   else ifeq ($(OPENJDK_TARGET_CPU), sparcv9)
     JVM_CFLAGS += $(TOPDIR)/src/hotspot/os_cpu/solaris_sparc/solaris_sparc.il
   endif
+  # Exclude warnings in devstudio 12.6
+  ifeq ($(CC_VERSION_NUMBER), 5.15)
+    DISABLED_WARNINGS_solstudio := SEC_ARR_OUTSIDE_BOUND_READ \
+      SEC_ARR_OUTSIDE_BOUND_WRITE
+  endif
 endif
 
 ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), solaris-sparcv9)
@@ -154,6 +159,7 @@
     vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     arguments.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
     DISABLED_WARNINGS_clang := tautological-compare, \
+    DISABLED_WARNINGS_solstudio := $(DISABLED_WARNINGS_solstudio), \
     DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
         1540-1088 1500-010, \
     ASFLAGS := $(JVM_ASFLAGS), \
--- a/make/lib/Awt2dLibraries.gmk	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/lib/Awt2dLibraries.gmk	Fri Apr 13 09:04:18 2018 -0700
@@ -403,11 +403,7 @@
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_unix := -L$(INSTALL_LIBRARIES_HERE), \
-    LDFLAGS_solaris := /usr/lib$(OPENJDK_TARGET_CPU_ISADIR)/libm.so.2, \
-    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS), \
-    LIBS_linux := $(LIBM), \
-    LIBS_macosx := $(LIBM), \
-    LIBS_aix := $(LIBM),\
+    LIBS_unix := -lawt -ljvm -ljava $(LCMS_LIBS) $(LIBM), \
     LIBS_windows := $(WIN_AWT_LIB) $(WIN_JAVA_LIB), \
 ))
 
--- a/make/lib/Lib-java.security.jgss.gmk	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/lib/Lib-java.security.jgss.gmk	Fri Apr 13 09:04:18 2018 -0700
@@ -27,25 +27,22 @@
 
 ################################################################################
 
-ifneq ($(OPENJDK_TARGET_OS), windows)
-  LIBJ2GSS_SRC := $(TOPDIR)/src/java.security.jgss/share/native/libj2gss \
-      $(TOPDIR)/src/java.security.jgss/$(OPENJDK_TARGET_OS_TYPE)/native/libj2gss \
-      #
+LIBJ2GSS_SRC := $(TOPDIR)/src/java.security.jgss/share/native/libj2gss \
+  #
 
-  $(eval $(call SetupJdkLibrary, BUILD_LIBJ2GSS, \
-      NAME := j2gss, \
-      SRC := $(LIBJ2GSS_SRC), \
-      OPTIMIZATION := LOW, \
-      CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBJ2GSS_SRC)) \
-          $(LIBJAVA_HEADER_FLAGS) \
-          -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \
-      LDFLAGS := $(LDFLAGS_JDKLIB) \
-          $(call SET_SHARED_LIBRARY_ORIGIN), \
-      LIBS := $(LIBDL), \
-  ))
+$(eval $(call SetupJdkLibrary, BUILD_LIBJ2GSS, \
+    NAME := j2gss, \
+    SRC := $(LIBJ2GSS_SRC), \
+    OPTIMIZATION := LOW, \
+    CFLAGS := $(CFLAGS_JDKLIB) $(addprefix -I, $(LIBJ2GSS_SRC)) \
+        $(LIBJAVA_HEADER_FLAGS) \
+        -I$(SUPPORT_OUTPUTDIR)/headers/java.security.jgss, \
+    LDFLAGS := $(LDFLAGS_JDKLIB) \
+        $(call SET_SHARED_LIBRARY_ORIGIN), \
+    LIBS := $(LIBDL), \
+))
 
-  TARGETS += $(BUILD_LIBJ2GSS)
-endif
+TARGETS += $(BUILD_LIBJ2GSS)
 
 ################################################################################
 
--- a/make/test/JtregNativeHotspot.gmk	Thu Apr 12 16:25:29 2018 -0700
+++ b/make/test/JtregNativeHotspot.gmk	Fri Apr 13 09:04:18 2018 -0700
@@ -65,8 +65,11 @@
       exeinvoke.c exestack-gap.c
 endif
 
+BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
+
 ifeq ($(OPENJDK_TARGET_OS), windows)
     BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
+    BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c
 endif
 
 $(eval $(call SetupTestFilesCompilation, BUILD_HOTSPOT_JTREG_LIBRARIES, \
--- a/src/bsd/doc/man/java.1	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/bsd/doc/man/java.1	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 '\" t
-.\" Copyright (c) 1994, 2015, Oracle and/or its affiliates. All rights reserved.
+.\" Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved.
 .\" DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 .\"
 .\" This code is free software; you can redistribute it and/or modify it
@@ -1178,65 +1178,6 @@
 .PP
 These options control the runtime behavior of the Java HotSpot VM\&.
 .PP
-\-XX:+CheckEndorsedAndExtDirs
-.RS 4
-Enables the option to prevent the
-\fBjava\fR
-command from running a Java application if it uses the endorsed\-standards override mechanism or the extension mechanism\&. This option checks if an application is using one of these mechanisms by checking the following:
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBjava\&.ext\&.dirs\fR
-or
-\fBjava\&.endorsed\&.dirs\fR
-system property is set\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/endorsed\fR
-directory exists and is not empty\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The
-\fBlib/ext\fR
-directory contains any JAR files other than those of the JDK\&.
-.RE
-.sp
-.RS 4
-.ie n \{\
-\h'-04'\(bu\h'+03'\c
-.\}
-.el \{\
-.sp -1
-.IP \(bu 2.3
-.\}
-The system\-wide platform\-specific extension directory contains any JAR files\&.
-.RE
-.RE
-.PP
 \-XX:+DisableAttachMechanism
 .RS 4
 Enables the option that disables the mechanism that lets tools attach to the JVM\&. By default, this option is disabled, meaning that the attach mechanism is enabled and you can use tools such as
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Fri Apr 13 09:04:18 2018 -0700
@@ -995,8 +995,10 @@
 
 source_hpp %{
 
+#include "asm/macroAssembler.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "opto/addnode.hpp"
 
 class CallStubImpl {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -35,8 +35,9 @@
 #include "compiler/disassembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_aarch64.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
 #include "opto/node.hpp"
@@ -46,7 +47,6 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
@@ -173,7 +173,7 @@
   // instruction.
   if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
     // Move narrow OOP
-    narrowOop n = oopDesc::encode_heap_oop((oop)o);
+    narrowOop n = CompressedOops::encode((oop)o);
     Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
     Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
     instructions = 2;
@@ -3712,7 +3712,7 @@
   }
 }
 
-// Algorithm must match oop.inline.hpp encode_heap_oop.
+// Algorithm must match CompressedOops::encode.
 void MacroAssembler::encode_heap_oop(Register d, Register s) {
 #ifdef ASSERT
   verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 #define CPU_AARCH64_VM_NATIVEINST_AARCH64_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -31,7 +31,7 @@
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
   void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                       Register addr, Register count, , int callee_saved_regs);
+                                       Register addr, Register count, int callee_saved_regs);
   void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                         Register addr, Register count, Register tmp);
 };
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,7 +32,7 @@
 class BarrierSetAssembler: public CHeapObj<mtGC> {
 public:
   virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
-                                  Register addr, Register count, , int callee_saved_regs) {}
+                                  Register addr, Register count, int callee_saved_regs) {}
   virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
                                   Register addr, Register count, Register tmp) {}
 };
--- a/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -44,6 +44,7 @@
 void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                                     Register addr, Register count, Register tmp) {
   BLOCK_COMMENT("CardTablePostBarrier");
+  BarrierSet* bs = Universe::heap()->barrier_set();
   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
   CardTable* ct = ctbs->card_table();
   assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
--- a/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -31,7 +31,7 @@
 class ModRefBarrierSetAssembler: public BarrierSetAssembler {
 protected:
   virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
-                                               Register addr, Register count, , int callee_saved_regs) {}
+                                               Register addr, Register count, int callee_saved_regs) {}
   virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                 Register addr, Register count, Register tmp) {}
 
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -37,7 +37,7 @@
 
 #define __ _masm->
 
-Interpreter::SignatureHandlerGenerator::SignatureHandlerGenerator(
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
     const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
   _masm = new MacroAssembler(buffer);
   _abi_offset = 0;
--- a/src/hotspot/cpu/arm/nativeInst_arm.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define CPU_ARM_VM_NATIVEINST_ARM_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,8 +27,9 @@
 #include "code/codeCache.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_arm.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -105,7 +106,7 @@
     uintptr_t nx = 0;
     int val_size = 32;
     if (oop_addr != NULL) {
-      narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
+      narrowOop encoded_oop = CompressedOops::encode(*oop_addr);
       nx = encoded_oop;
     } else if (metadata_addr != NULL) {
       assert((*metadata_addr)->is_klass(), "expected Klass");
@@ -240,4 +241,3 @@
   assert(NativeCall::is_call_before(return_address), "must be");
   return nativeCall_at(call_for(return_address));
 }
-
--- a/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/nativeInst_arm_64.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "asm/macroAssembler.hpp"
 #include "code/codeCache.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,8 @@
 #include "assembler_arm.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_arm.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -40,7 +41,7 @@
       uintptr_t d = ni->data();
       guarantee((d >> 32) == 0, "not narrow oop");
       narrowOop no = d;
-      oop o = oopDesc::decode_heap_oop(no);
+      oop o = CompressedOops::decode(no);
       guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
     } else {
       ni->set_data((intptr_t)x);
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -2877,7 +2877,7 @@
     // 'to' is the beginning of the region
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_epilogue(this, decorators, true, to, count, tmp);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, count, tmp);
 
     if (status) {
       __ mov(R0, 0); // OK
@@ -2954,7 +2954,7 @@
     }
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
     // save arguments for barrier generation (after the pre barrier)
     __ mov(saved_count, count);
@@ -3220,7 +3220,7 @@
     DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
 
     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
-    bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
+    bs->arraycopy_prologue(_masm, decorators, true, to, count, callee_saved_regs);
 
 #ifndef AARCH64
     const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
@@ -3298,7 +3298,7 @@
     __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
     __ mov(R12, copied); // count arg scratched by post barrier
 
-    bs->arraycopy_epilogue(this, decorators, true, to, R12, R3);
+    bs->arraycopy_epilogue(_masm, decorators, true, to, R12, R3);
 
     assert_different_registers(R3,R12,LR,copied,saved_count);
     inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/ppc/frame_ppc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -384,7 +384,7 @@
 
   // Constructors
   inline frame(intptr_t* sp);
-  frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc);
   inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
  private:
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,8 +27,10 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "nativeInst_ppc.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/handles.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/ostream.hpp"
@@ -194,7 +196,7 @@
   CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
   if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
     narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
-    return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
+    return cast_from_oop<intptr_t>(CompressedOops::decode(no));
   } else {
     assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
 
@@ -415,4 +417,3 @@
 
   *(address*)(ctable + destination_toc_offset()) = new_destination;
 }
-
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 #define CPU_PPC_VM_NATIVEINST_PPC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/ppc/relocInfo_ppc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,8 +27,9 @@
 #include "asm/assembler.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_ppc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -57,7 +58,7 @@
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
              "how to encode else?");
       narrowOop no = (type() == relocInfo::oop_type) ?
-        oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+          CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
     }
   } else {
--- a/src/hotspot/cpu/s390/frame_s390.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/s390/frame_s390.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -465,10 +465,10 @@
  // Constructors
 
  public:
-  frame(intptr_t* sp);
+  inline frame(intptr_t* sp);
   // To be used, if sp was not extended to match callee's calling convention.
-  frame(intptr_t* sp, address pc);
-  frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
+  inline frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
 
   // Access frame via stack pointer.
   inline intptr_t* sp_addr_at(int index) const  { return &sp()[index]; }
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -33,6 +33,7 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
@@ -1286,7 +1287,7 @@
 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_load_const_32to64(pos, no);
 }
 
@@ -1304,7 +1305,7 @@
 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
   assert(UseCompressedOops, "Can only patch compressed oops");
 
-  narrowOop no = oopDesc::encode_heap_oop(o);
+  narrowOop no = CompressedOops::encode(o);
   return patch_compare_immediate_32(pos, no);
 }
 
--- a/src/hotspot/cpu/s390/nativeInst_s390.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/s390/nativeInst_s390.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,7 +29,6 @@
 #define CPU_S390_VM_NATIVEINST_S390_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -41,17 +41,6 @@
 REGISTER_DECLARATION(FloatRegister, Ftos_d1, F0); // for 1st part of double
 REGISTER_DECLARATION(FloatRegister, Ftos_d2, F1); // for 2nd part of double
 
-#ifndef DONT_USE_REGISTER_DEFINES
-#define Otos_i  O0
-#define Otos_l  O0
-#define Otos_l1 O0
-#define Otos_l2 O1
-#define Ftos_f  F0
-#define Ftos_d  F0
-#define Ftos_d1 F0
-#define Ftos_d2 F1
-#endif // DONT_USE_REGISTER_DEFINES
-
 class InterpreterMacroAssembler: public MacroAssembler {
  protected:
   // Interpreter specific version of call_VM_base
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -998,8 +998,13 @@
 
 
 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
-  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
-  assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+#ifdef ASSERT
+  {
+    ThreadInVMfromUnknown tiv;
+    assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+    assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
+  }
+#endif
   int oop_index = oop_recorder()->find_index(obj);
   return AddressLiteral(obj, oop_Relocation::spec(oop_index));
 }
@@ -3703,7 +3708,7 @@
 // Called from init_globals() after universe_init() and before interpreter_init()
 void g1_barrier_stubs_init() {
   CollectedHeap* heap = Universe::heap();
-  if (heap->kind() == CollectedHeap::G1CollectedHeap) {
+  if (heap->kind() == CollectedHeap::G1) {
     // Only needed for G1
     if (dirty_card_log_enqueue == 0) {
       G1BarrierSet* bs =
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -199,41 +199,6 @@
 REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
 
-
-// These must occur after the declarations above
-#ifndef DONT_USE_REGISTER_DEFINES
-
-#define Gthread             AS_REGISTER(Register, Gthread)
-#define Gmethod             AS_REGISTER(Register, Gmethod)
-#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
-#define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
-#define Gargs               AS_REGISTER(Register, Gargs)
-#define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
-#define Gframe_size         AS_REGISTER(Register, Gframe_size)
-#define Gtemp               AS_REGISTER(Register, Gtemp)
-
-#define Lesp                AS_REGISTER(Register, Lesp)
-#define Lbcp                AS_REGISTER(Register, Lbcp)
-#define Lmethod             AS_REGISTER(Register, Lmethod)
-#define Llocals             AS_REGISTER(Register, Llocals)
-#define Lmonitors           AS_REGISTER(Register, Lmonitors)
-#define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
-#define Lscratch            AS_REGISTER(Register, Lscratch)
-#define Lscratch2           AS_REGISTER(Register, Lscratch2)
-#define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
-
-#define Lentry_args         AS_REGISTER(Register, Lentry_args)
-#define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
-#define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
-#define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
-#define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
-
-#define Oexception          AS_REGISTER(Register, Oexception)
-#define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
-
-#endif
-
-
 // Address is an abstraction used to represent a memory location.
 //
 // Note: A register location is represented via a Register, not
--- a/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/nativeInst_sparc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,7 +26,6 @@
 #define CPU_SPARC_VM_NATIVEINST_SPARC_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/register_definitions_sparc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -22,9 +22,6 @@
  *
  */
 
-// make sure the defines don't screw up the declarations later on in this file
-#define DONT_USE_REGISTER_DEFINES
-
 // Note: precompiled headers can not be used in this file because of the above
 //       definition
 
--- a/src/hotspot/cpu/sparc/register_sparc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/register_sparc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -154,62 +154,6 @@
 CONSTANT_REGISTER_DECLARATION(Register, FP    , (RegisterImpl::ibase + 6));
 CONSTANT_REGISTER_DECLARATION(Register, SP    , (RegisterImpl::obase + 6));
 
-//
-// Because sparc has so many registers, #define'ing values for the is
-// beneficial in code size and the cost of some of the dangers of
-// defines.  We don't use them on Intel because win32 uses asm
-// directives which use the same names for registers as Hotspot does,
-// so #defines would screw up the inline assembly.  If a particular
-// file has a problem with these defines then it's possible to turn
-// them off in that file by defining DONT_USE_REGISTER_DEFINES.
-// register_definition_sparc.cpp does that so that it's able to
-// provide real definitions of these registers for use in debuggers
-// and such.
-//
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define noreg ((Register)(noreg_RegisterEnumValue))
-
-#define G0 ((Register)(G0_RegisterEnumValue))
-#define G1 ((Register)(G1_RegisterEnumValue))
-#define G2 ((Register)(G2_RegisterEnumValue))
-#define G3 ((Register)(G3_RegisterEnumValue))
-#define G4 ((Register)(G4_RegisterEnumValue))
-#define G5 ((Register)(G5_RegisterEnumValue))
-#define G6 ((Register)(G6_RegisterEnumValue))
-#define G7 ((Register)(G7_RegisterEnumValue))
-
-#define O0 ((Register)(O0_RegisterEnumValue))
-#define O1 ((Register)(O1_RegisterEnumValue))
-#define O2 ((Register)(O2_RegisterEnumValue))
-#define O3 ((Register)(O3_RegisterEnumValue))
-#define O4 ((Register)(O4_RegisterEnumValue))
-#define O5 ((Register)(O5_RegisterEnumValue))
-#define O6 ((Register)(O6_RegisterEnumValue))
-#define O7 ((Register)(O7_RegisterEnumValue))
-
-#define L0 ((Register)(L0_RegisterEnumValue))
-#define L1 ((Register)(L1_RegisterEnumValue))
-#define L2 ((Register)(L2_RegisterEnumValue))
-#define L3 ((Register)(L3_RegisterEnumValue))
-#define L4 ((Register)(L4_RegisterEnumValue))
-#define L5 ((Register)(L5_RegisterEnumValue))
-#define L6 ((Register)(L6_RegisterEnumValue))
-#define L7 ((Register)(L7_RegisterEnumValue))
-
-#define I0 ((Register)(I0_RegisterEnumValue))
-#define I1 ((Register)(I1_RegisterEnumValue))
-#define I2 ((Register)(I2_RegisterEnumValue))
-#define I3 ((Register)(I3_RegisterEnumValue))
-#define I4 ((Register)(I4_RegisterEnumValue))
-#define I5 ((Register)(I5_RegisterEnumValue))
-#define I6 ((Register)(I6_RegisterEnumValue))
-#define I7 ((Register)(I7_RegisterEnumValue))
-
-#define FP ((Register)(FP_RegisterEnumValue))
-#define SP ((Register)(SP_RegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Use FloatRegister as shortcut
 class FloatRegisterImpl;
 typedef FloatRegisterImpl* FloatRegister;
@@ -321,59 +265,6 @@
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F60    , (60));
 CONSTANT_REGISTER_DECLARATION(FloatRegister, F62    , (62));
 
-
-#ifndef DONT_USE_REGISTER_DEFINES
-#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
-#define F0     ((FloatRegister)(    F0_FloatRegisterEnumValue))
-#define F1     ((FloatRegister)(    F1_FloatRegisterEnumValue))
-#define F2     ((FloatRegister)(    F2_FloatRegisterEnumValue))
-#define F3     ((FloatRegister)(    F3_FloatRegisterEnumValue))
-#define F4     ((FloatRegister)(    F4_FloatRegisterEnumValue))
-#define F5     ((FloatRegister)(    F5_FloatRegisterEnumValue))
-#define F6     ((FloatRegister)(    F6_FloatRegisterEnumValue))
-#define F7     ((FloatRegister)(    F7_FloatRegisterEnumValue))
-#define F8     ((FloatRegister)(    F8_FloatRegisterEnumValue))
-#define F9     ((FloatRegister)(    F9_FloatRegisterEnumValue))
-#define F10    ((FloatRegister)(   F10_FloatRegisterEnumValue))
-#define F11    ((FloatRegister)(   F11_FloatRegisterEnumValue))
-#define F12    ((FloatRegister)(   F12_FloatRegisterEnumValue))
-#define F13    ((FloatRegister)(   F13_FloatRegisterEnumValue))
-#define F14    ((FloatRegister)(   F14_FloatRegisterEnumValue))
-#define F15    ((FloatRegister)(   F15_FloatRegisterEnumValue))
-#define F16    ((FloatRegister)(   F16_FloatRegisterEnumValue))
-#define F17    ((FloatRegister)(   F17_FloatRegisterEnumValue))
-#define F18    ((FloatRegister)(   F18_FloatRegisterEnumValue))
-#define F19    ((FloatRegister)(   F19_FloatRegisterEnumValue))
-#define F20    ((FloatRegister)(   F20_FloatRegisterEnumValue))
-#define F21    ((FloatRegister)(   F21_FloatRegisterEnumValue))
-#define F22    ((FloatRegister)(   F22_FloatRegisterEnumValue))
-#define F23    ((FloatRegister)(   F23_FloatRegisterEnumValue))
-#define F24    ((FloatRegister)(   F24_FloatRegisterEnumValue))
-#define F25    ((FloatRegister)(   F25_FloatRegisterEnumValue))
-#define F26    ((FloatRegister)(   F26_FloatRegisterEnumValue))
-#define F27    ((FloatRegister)(   F27_FloatRegisterEnumValue))
-#define F28    ((FloatRegister)(   F28_FloatRegisterEnumValue))
-#define F29    ((FloatRegister)(   F29_FloatRegisterEnumValue))
-#define F30    ((FloatRegister)(   F30_FloatRegisterEnumValue))
-#define F31    ((FloatRegister)(   F31_FloatRegisterEnumValue))
-#define F32    ((FloatRegister)(   F32_FloatRegisterEnumValue))
-#define F34    ((FloatRegister)(   F34_FloatRegisterEnumValue))
-#define F36    ((FloatRegister)(   F36_FloatRegisterEnumValue))
-#define F38    ((FloatRegister)(   F38_FloatRegisterEnumValue))
-#define F40    ((FloatRegister)(   F40_FloatRegisterEnumValue))
-#define F42    ((FloatRegister)(   F42_FloatRegisterEnumValue))
-#define F44    ((FloatRegister)(   F44_FloatRegisterEnumValue))
-#define F46    ((FloatRegister)(   F46_FloatRegisterEnumValue))
-#define F48    ((FloatRegister)(   F48_FloatRegisterEnumValue))
-#define F50    ((FloatRegister)(   F50_FloatRegisterEnumValue))
-#define F52    ((FloatRegister)(   F52_FloatRegisterEnumValue))
-#define F54    ((FloatRegister)(   F54_FloatRegisterEnumValue))
-#define F56    ((FloatRegister)(   F56_FloatRegisterEnumValue))
-#define F58    ((FloatRegister)(   F58_FloatRegisterEnumValue))
-#define F60    ((FloatRegister)(   F60_FloatRegisterEnumValue))
-#define F62    ((FloatRegister)(   F62_FloatRegisterEnumValue))
-#endif // DONT_USE_REGISTER_DEFINES
-
 // Maximum number of incoming arguments that can be passed in i registers.
 const int SPARC_ARGS_IN_REGS_NUM = 6;
 
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,8 +26,9 @@
 #include "asm/assembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_sparc.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
@@ -97,7 +98,7 @@
     guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
     if (format() != 0) {
       assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
-      jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+      jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
       inst &= ~Assembler::hi22(-1);
       inst |=  Assembler::hi22((intptr_t)np);
       if (verify_only) {
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -4080,6 +4080,16 @@
   emit_operand(dst, src);
   emit_int8(mode & 0xFF);
 }
+void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x43);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8(imm8 & 0xFF);
+}
 
 void Assembler::psrldq(XMMRegister dst, int shift) {
   // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
@@ -6201,6 +6211,27 @@
   emit_operand(dst, src);
 }
 
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_int8((unsigned char)(0xC0 | encode));
+}
+
+void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
+  assert(VM_Version::supports_evex(), "requires EVEX support");
+  assert(dst != xnoreg, "sanity");
+  InstructionMark im(this);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
+  vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
+  emit_int8((unsigned char)0xEF);
+  emit_operand(dst, src);
+}
+
 
 // vinserti forms
 
@@ -6786,6 +6817,16 @@
   emit_int8((unsigned char)mask);
 }
 
+void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
+  assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support");
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
+  attributes.set_is_evex_instruction();
+  int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
+  emit_int8(0x44);
+  emit_int8((unsigned char)(0xC0 | encode));
+  emit_int8((unsigned char)mask);
+}
+
 void Assembler::vzeroupper() {
   if (VM_Version::supports_vzeroupper()) {
     InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1663,6 +1663,9 @@
   void pshuflw(XMMRegister dst, XMMRegister src, int mode);
   void pshuflw(XMMRegister dst, Address src,     int mode);
 
+  // Shuffle packed values at 128 bit granularity
+  void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);
+
   // Shift Right by bytes Logical DoubleQuadword Immediate
   void psrldq(XMMRegister dst, int shift);
   // Shift Left by bytes Logical DoubleQuadword Immediate
@@ -2046,6 +2049,9 @@
   void pxor(XMMRegister dst, XMMRegister src);
   void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
   void vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
+  void evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len);
+
 
   // vinserti forms
   void vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8);
@@ -2108,7 +2114,7 @@
   // Carry-Less Multiplication Quadword
   void pclmulqdq(XMMRegister dst, XMMRegister src, int mask);
   void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask);
-
+  void evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len);
   // AVX instruction which is used to clear upper 128 bits of YMM registers and
   // to avoid transaction penalty between AVX and SSE states. There is no
   // penalty if legacy SSE instructions are encoded using VEX prefix because
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -10120,6 +10120,16 @@
 }
 
 /**
+* Fold four 128-bit data chunks
+*/
+void MacroAssembler::fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
+  evpclmulhdq(xtmp, xK, xcrc, Assembler::AVX_512bit); // [123:64]
+  evpclmulldq(xcrc, xK, xcrc, Assembler::AVX_512bit); // [63:0]
+  evpxorq(xcrc, xcrc, Address(buf, offset), Assembler::AVX_512bit /* vector_len */);
+  evpxorq(xcrc, xcrc, xtmp, Assembler::AVX_512bit /* vector_len */);
+}
+
+/**
  * Fold 128-bit data chunk
  */
 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
@@ -10224,6 +10234,34 @@
   shrl(len, 4);
   jcc(Assembler::zero, L_tail_restore);
 
+  // Fold total 512 bits of polynomial on each iteration
+  if (VM_Version::supports_vpclmulqdq()) {
+    Label Parallel_loop, L_No_Parallel;
+
+    cmpl(len, 8);
+    jccb(Assembler::less, L_No_Parallel);
+
+    movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
+    evmovdquq(xmm1, Address(buf, 0), Assembler::AVX_512bit);
+    movdl(xmm5, crc);
+    evpxorq(xmm1, xmm1, xmm5, Assembler::AVX_512bit);
+    addptr(buf, 64);
+    subl(len, 7);
+    evshufi64x2(xmm0, xmm0, xmm0, 0x00, Assembler::AVX_512bit); //propagate the mask from 128 bits to 512 bits
+
+    BIND(Parallel_loop);
+    fold_128bit_crc32_avx512(xmm1, xmm0, xmm5, buf, 0);
+    addptr(buf, 64);
+    subl(len, 4);
+    jcc(Assembler::greater, Parallel_loop);
+
+    vextracti64x2(xmm2, xmm1, 0x01);
+    vextracti64x2(xmm3, xmm1, 0x02);
+    vextracti64x2(xmm4, xmm1, 0x03);
+    jmp(L_fold_512b);
+
+    BIND(L_No_Parallel);
+  }
   // Fold crc into first bytes of vector
   movdqa(xmm1, Address(buf, 0));
   movdl(rax, xmm1);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1498,6 +1498,14 @@
     // 0x11 - multiply upper 64 bits [64:127]
     Assembler::vpclmulqdq(dst, nds, src, 0x11);
   }
+  void evpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x00 - multiply lower 64 bits [0:63]
+    Assembler::evpclmulqdq(dst, nds, src, 0x00, vector_len);
+  }
+  void evpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
+    // 0x11 - multiply upper 64 bits [64:127]
+    Assembler::evpclmulqdq(dst, nds, src, 0x11, vector_len);
+  }
 
   // Data
 
@@ -1723,6 +1731,7 @@
   // Fold 8-bit data
   void fold_8bit_crc32(Register crc, Register table, Register tmp);
   void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp);
+  void fold_128bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset);
 
   // Compress char[] array to byte[].
   void char_array_compress(Register src, Register dst, Register len,
--- a/src/hotspot/cpu/x86/nativeInst_x86.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/nativeInst_x86.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,7 +26,6 @@
 #define CPU_X86_VM_NATIVEINST_X86_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
--- a/src/hotspot/cpu/x86/relocInfo_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/relocInfo_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_x86.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/safepoint.hpp"
@@ -51,9 +52,9 @@
     // both compressed oops and compressed classes look the same
     if (Universe::heap()->is_in_reserved((oop)x)) {
     if (verify_only) {
-      guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
+      guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
     } else {
-      *(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
+      *(int32_t*) disp = CompressedOops::encode((oop)x);
     }
   } else {
       if (verify_only) {
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "code/debugInfoRec.hpp"
 #include "code/icBuffer.hpp"
+#include "code/nativeInst.hpp"
 #include "code/vtableStubs.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -41,6 +41,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "vm_version_x86.hpp"
 #include "vmreg_x86.inline.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -665,6 +665,7 @@
     _features &= ~CPU_AVX512BW;
     _features &= ~CPU_AVX512VL;
     _features &= ~CPU_AVX512_VPOPCNTDQ;
+    _features &= ~CPU_VPCLMULQDQ;
   }
 
   if (UseAVX < 2)
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -334,6 +334,7 @@
 #define CPU_FMA ((uint64_t)UCONST64(0x800000000))      // FMA instructions
 #define CPU_VZEROUPPER ((uint64_t)UCONST64(0x1000000000))       // Vzeroupper instruction
 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
+#define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
 
   enum Extended_Family {
     // AMD
@@ -542,6 +543,8 @@
           result |= CPU_AVX512VL;
         if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
           result |= CPU_AVX512_VPOPCNTDQ;
+        if (_cpuid_info.sef_cpuid7_ecx.bits.vpclmulqdq != 0)
+          result |= CPU_VPCLMULQDQ;
       }
     }
     if(_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
@@ -819,6 +822,7 @@
   static bool supports_fma()        { return (_features & CPU_FMA) != 0 && supports_avx(); }
   static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
   static bool supports_vpopcntdq()  { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
+  static bool supports_vpclmulqdq() { return (_features & CPU_VPCLMULQDQ) != 0; }
 
   // Intel features
   static bool is_intel_family_core() { return is_intel() &&
--- a/src/hotspot/cpu/zero/nativeInst_zero.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/cpu/zero/nativeInst_zero.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 #define CPU_ZERO_VM_NATIVEINST_ZERO_HPP
 
 #include "asm/assembler.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/os.hpp"
 
--- a/src/hotspot/os/aix/attachListener_aix.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/aix/attachListener_aix.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os/linux/attachListener_linux.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/linux/attachListener_linux.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
--- a/src/hotspot/os/linux/os_linux.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/linux/os_linux.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -152,6 +152,13 @@
 
 static int clock_tics_per_sec = 100;
 
+// If the VM might have been created on the primordial thread, we need to resolve the
+// primordial thread stack bounds and check if the current thread might be the
+// primordial thread in places. If we know that the primordial thread is never used,
+// such as when the VM was created by one of the standard java launchers, we can
+// avoid this
+static bool suppress_primordial_thread_resolution = false;
+
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
 static bool check_signals = true;
@@ -917,6 +924,9 @@
 
 // Check if current thread is the primordial thread, similar to Solaris thr_main.
 bool os::is_primordial_thread(void) {
+  if (suppress_primordial_thread_resolution) {
+    return false;
+  }
   char dummy;
   // If called before init complete, thread stack bottom will be null.
   // Can be called if fatal error occurs before initialization.
@@ -1644,10 +1654,7 @@
         //
         // Dynamic loader will make all stacks executable after
         // this function returns, and will not do that again.
-#ifdef ASSERT
-        ThreadsListHandle tlh;
-        assert(tlh.length() == 0, "no Java threads should exist yet.");
-#endif
+        assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
       } else {
         warning("You have loaded library %s which might have disabled stack guard. "
                 "The VM will try to fix the stack guard now.\n"
@@ -4936,7 +4943,11 @@
   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
     return JNI_ERR;
   }
-  Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+
+  suppress_primordial_thread_resolution = Arguments::created_by_java_launcher();
+  if (!suppress_primordial_thread_resolution) {
+    Linux::capture_initial_stack(JavaThread::stack_size_at_create());
+  }
 
 #if defined(IA32)
   workaround_expand_exec_shield_cs_limit();
--- a/src/hotspot/os/posix/os_posix.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/posix/os_posix.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "jvm.h"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "runtime/frame.inline.hpp"
@@ -30,6 +31,7 @@
 #include "runtime/os.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/formatBuffer.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
--- a/src/hotspot/os/posix/vmError_posix.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/posix/vmError_posix.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,6 +27,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "runtime/thread.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 #include <sys/types.h>
@@ -122,11 +123,20 @@
     pc = (address) info->si_addr;
   }
 
+  // Needed to make it possible to call SafeFetch.. APIs in error handling.
   if (uc && pc && StubRoutines::is_safefetch_fault(pc)) {
     os::Posix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
     return;
   }
 
+  // Needed because asserts may happen in error handling too.
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return;
+  }
+#endif // CAN_SHOW_REGISTERS_ON_ASSERT
+
   VMError::report_and_die(NULL, sig, pc, info, ucVoid);
 }
 
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "logging/log.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/os.inline.hpp"
 #include "services/attachListener.hpp"
--- a/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/thread.hpp"
 
 frame JavaThread::pd_last_frame() {
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_bsd.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -50,6 +50,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 #ifdef BUILTIN_SIM
@@ -306,6 +307,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,7 @@
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -311,6 +312,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -51,6 +51,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -266,6 +267,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -54,6 +54,7 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
 #include "utilities/events.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
@@ -270,6 +271,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -513,6 +514,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_linux.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/timer.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -303,6 +305,13 @@
     }
   }
 
+#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
+  if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
+    handle_assert_poison_fault(ucVoid, info->si_addr);
+    return 1;
+  }
+#endif
+
   JavaThread* thread = NULL;
   VMThread* vmthread = NULL;
   if (os::Linux::signal_handlers_are_installed) {
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,6 +32,7 @@
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
 #include "interpreter/interpreter.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "os_share_solaris.hpp"
 #include "prims/jniFastGetField.hpp"
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,8 +29,8 @@
 #include "classfile/javaAssertions.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/abstractInterpreter.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,7 +32,6 @@
 #include "compiler/compilerOracle.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "jvmci/compilerRuntime.hpp"
 #include "jvmci/jvmciRuntime.hpp"
 #include "oops/method.inline.hpp"
@@ -40,6 +39,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/xmlstream.hpp"
 
--- a/src/hotspot/share/asm/codeBuffer.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/asm/codeBuffer.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -25,10 +25,10 @@
 #include "precompiled.hpp"
 #include "asm/codeBuffer.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/icache.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/xmlstream.hpp"
--- a/src/hotspot/share/c1/c1_FpuStackSim.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/c1/c1_FpuStackSim.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_C1_C1_FPUSTACKSIM_HPP
 
 #include "c1/c1_FrameMap.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/macros.hpp"
 
 // Provides location for forward declaration of this class, which is
--- a/src/hotspot/share/c1/c1_Optimizer.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/c1/c1_Optimizer.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "c1/c1_IR.hpp"
 #include "c1/c1_Instruction.hpp"
-#include "memory/allocation.hpp"
 
 class Optimizer {
  private:
--- a/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,7 @@
 #include "oops/oop.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
-
-
+#include "utilities/copy.hpp"
 
 #ifndef PRODUCT
   #define TRACE_BCEA(level, code)                                            \
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/ciEnv.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -57,6 +57,7 @@
 #include "runtime/init.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.inline.hpp"
 #include "trace/tracing.hpp"
@@ -540,7 +541,7 @@
     // Calculate accessibility the hard way.
     if (!k->is_loaded()) {
       is_accessible = false;
-    } else if (k->loader() != accessor->loader() &&
+    } else if (!oopDesc::equals(k->loader(), accessor->loader()) &&
                get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) {
       // Loaded only remotely.  Not linked yet.
       is_accessible = false;
@@ -591,7 +592,7 @@
     index = cpool->object_to_cp_index(cache_index);
     oop obj = cpool->resolved_references()->obj_at(cache_index);
     if (obj != NULL) {
-      if (obj == Universe::the_null_sentinel()) {
+      if (oopDesc::equals(obj, Universe::the_null_sentinel())) {
         return ciConstant(T_OBJECT, get_object(NULL));
       }
       BasicType bt = T_OBJECT;
--- a/src/hotspot/share/ci/ciFlags.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/ciFlags.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "jvm.h"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/ostream.hpp"
 
--- a/src/hotspot/share/ci/ciMetadata.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/ciMetadata.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObject.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/ciObject.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "ci/ciBaseObject.hpp"
 #include "ci/ciClassList.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/jniHandles.hpp"
 
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -249,7 +249,7 @@
   // into the cache.
   Handle keyHandle(Thread::current(), key);
   ciObject* new_object = create_new_object(keyHandle());
-  assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
+  assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded");
   init_ident_of(new_object);
   assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
 
@@ -450,8 +450,8 @@
   for (int i=0; i<_unloaded_klasses->length(); i++) {
     ciKlass* entry = _unloaded_klasses->at(i);
     if (entry->name()->equals(name) &&
-        entry->loader() == loader &&
-        entry->protection_domain() == domain) {
+        oopDesc::equals(entry->loader(), loader) &&
+        oopDesc::equals(entry->protection_domain(), domain)) {
       // We've found a match.
       return entry;
     }
--- a/src/hotspot/share/classfile/classFileParser.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -37,7 +37,6 @@
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -62,6 +61,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/timer.hpp"
 #include "services/classLoadingService.hpp"
@@ -69,6 +69,7 @@
 #include "trace/traceMacros.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "utilities/copy.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/growableArray.hpp"
@@ -5423,6 +5424,8 @@
   // has to be changed accordingly.
   ik->set_initial_method_idnum(ik->methods()->length());
 
+  ik->set_this_class_index(_this_class_index);
+
   if (is_anonymous()) {
     // _this_class_index is a CONSTANT_Class entry that refers to this
     // anonymous class itself. If this class needs to refer to its own methods or
--- a/src/hotspot/share/classfile/classLoader.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/classLoader.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -64,7 +64,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vm_version.hpp"
@@ -148,8 +148,6 @@
 #if INCLUDE_CDS
 ClassPathEntry* ClassLoader::_app_classpath_entries = NULL;
 ClassPathEntry* ClassLoader::_last_app_classpath_entry = NULL;
-GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
-GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
 SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
 #endif
 
--- a/src/hotspot/share/classfile/classLoader.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/classLoader.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -233,12 +233,6 @@
   // Last entry in linked list of appended ClassPathEntry instances
   static ClassPathEntry* _last_append_entry;
 
-  // Array of module names associated with the boot class loader
-  CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
-
-  // Array of module names associated with the platform class loader
-  CDS_ONLY(static GrowableArray<char*>* _platform_modules_array;)
-
   // Info used by CDS
   CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
 
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -56,7 +56,6 @@
 #include "classfile/packageEntry.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
@@ -74,6 +73,7 @@
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/synchronizer.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
@@ -201,7 +201,7 @@
   VerifyContainsOopClosure(oop target) : _target(target), _found(false) {}
 
   void do_oop(oop* p) {
-    if (p != NULL && *p == _target) {
+    if (p != NULL && oopDesc::equals(RawAccess<>::oop_load(p), _target)) {
       _found = true;
     }
   }
@@ -380,7 +380,7 @@
 
     // Just return if this dependency is to a class with the same or a parent
     // class_loader.
-    if (from == to || java_lang_ClassLoader::isAncestor(from, to)) {
+    if (oopDesc::equals(from, to) || java_lang_ClassLoader::isAncestor(from, to)) {
       return; // this class loader is in the parent list, no need to add it.
     }
   }
@@ -1223,17 +1223,6 @@
   return array;
 }
 
-bool ClassLoaderDataGraph::unload_list_contains(const void* x) {
-  assert(SafepointSynchronize::is_at_safepoint(), "only safe to call at safepoint");
-  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
-    // Needs fixing, see JDK-8199007.
-    if (cld->metaspace_or_null() != NULL && Metaspace::contains(x)) {
-      return true;
-    }
-  }
-  return false;
-}
-
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -155,8 +155,6 @@
   static void print() { print_on(tty); }
   static void verify();
 
-  static bool unload_list_contains(const void* x);
-
   // instance and array class counters
   static inline size_t num_instance_classes();
   static inline size_t num_array_classes();
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,6 +29,7 @@
 #include "logging/logMessage.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/numberSeq.hpp"
 #include <sys/stat.h>
@@ -182,7 +183,7 @@
 }
 
 void CompactStringTableWriter::add(unsigned int hash, oop string) {
-  CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
+  CompactHashtableWriter::add(hash, CompressedOops::encode(string));
 }
 
 void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
--- a/src/hotspot/share/classfile/compactHashtable.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,8 +26,10 @@
 #define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
 
 #include "classfile/compactHashtable.hpp"
+#include "classfile/javaClasses.hpp"
 #include "memory/allocation.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <class T, class N>
 inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
@@ -45,7 +47,7 @@
 inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
                                                 u4 offset, const char* name, int len) {
   narrowOop obj = (narrowOop)offset;
-  oop string = oopDesc::decode_heap_oop(obj);
+  oop string = CompressedOops::decode(obj);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
   }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -884,6 +884,10 @@
   if (new_methods->length() > 0) {
     ConstantPool* cp = bpool->create_constant_pool(CHECK);
     if (cp != klass->constants()) {
+      // Copy resolved anonymous class into new constant pool.
+      if (klass->is_anonymous()) {
+        cp->klass_at_put(klass->this_class_index(), klass);
+      }
       klass->class_loader_data()->add_to_deallocate_list(klass->constants());
       klass->set_constants(cp);
       cp->set_pool_holder(klass);
--- a/src/hotspot/share/classfile/dictionary.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/dictionary.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,7 +29,6 @@
 #include "classfile/protectionDomainCache.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/iterator.hpp"
@@ -38,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 // Optimization: if any dictionary needs resizing, we set this flag,
@@ -161,13 +161,13 @@
 
 bool DictionaryEntry::contains_protection_domain(oop protection_domain) const {
 #ifdef ASSERT
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
     for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
-      if (current->object_no_keepalive() == protection_domain) {
+      if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) {
         in_pd_set = true;
         break;
       }
@@ -179,7 +179,7 @@
   }
 #endif /* ASSERT */
 
-  if (protection_domain == instance_klass()->protection_domain()) {
+  if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) {
     // Succeeds trivially
     return true;
   }
@@ -187,7 +187,7 @@
   for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
-    if (current->object_no_keepalive() == protection_domain) return true;
+    if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) return true;
   }
   return false;
 }
--- a/src/hotspot/share/classfile/javaClasses.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -33,6 +33,7 @@
 #include "code/dependencyContext.hpp"
 #include "code/pcDesc.hpp"
 #include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/oopFactory.hpp"
@@ -57,6 +58,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 #include "utilities/align.hpp"
@@ -870,7 +872,7 @@
   } else {
     assert(Universe::is_module_initialized() ||
            (ModuleEntryTable::javabase_defined() &&
-            (module() == ModuleEntryTable::javabase_moduleEntry()->module())),
+            (oopDesc::equals(module(), ModuleEntryTable::javabase_moduleEntry()->module()))),
            "Incorrect java.lang.Module specification while creating mirror");
     set_module(mirror(), module());
   }
@@ -947,7 +949,7 @@
     }
 
     // set the classLoader field in the java_lang_Class instance
-    assert(class_loader() == k->class_loader(), "should be same");
+    assert(oopDesc::equals(class_loader(), k->class_loader()), "should be same");
     set_class_loader(mirror(), class_loader());
 
     // Setup indirection from klass->mirror
@@ -1461,9 +1463,9 @@
     // Note: create_basic_type_mirror above initializes ak to a non-null value.
     type = ArrayKlass::cast(ak)->element_type();
   } else {
-    assert(java_class == Universe::void_mirror(), "only valid non-array primitive");
+    assert(oopDesc::equals(java_class, Universe::void_mirror()), "only valid non-array primitive");
   }
-  assert(Universe::java_mirror(type) == java_class, "must be consistent");
+  assert(oopDesc::equals(Universe::java_mirror(type), java_class), "must be consistent");
   return type;
 }
 
@@ -3504,7 +3506,7 @@
 // Support for java_lang_ref_Reference
 
 bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) {
-  assert(!oopDesc::is_null(obj), "sanity");
+  assert(obj != NULL, "sanity");
   if (offset != java_lang_ref_Reference::referent_offset) {
     return false;
   }
@@ -3836,14 +3838,14 @@
 }
 
 bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) {
-  if (mt1 == mt2)
+  if (oopDesc::equals(mt1, mt2))
     return true;
-  if (rtype(mt1) != rtype(mt2))
+  if (!oopDesc::equals(rtype(mt1), rtype(mt2)))
     return false;
   if (ptype_count(mt1) != ptype_count(mt2))
     return false;
   for (int i = ptype_count(mt1) - 1; i >= 0; i--) {
-    if (ptype(mt1, i) != ptype(mt2, i))
+    if (!oopDesc::equals(ptype(mt1, i), ptype(mt2, i)))
       return false;
   }
   return true;
@@ -4041,7 +4043,7 @@
   // This loop taken verbatim from ClassLoader.java:
   do {
     acl = parent(acl);
-    if (cl == acl) {
+    if (oopDesc::equals(cl, acl)) {
       return true;
     }
     assert(++loop_count > 0, "loop_count overflow");
@@ -4071,7 +4073,7 @@
 
   oop cl = SystemDictionary::java_system_loader();
   while(cl != NULL) {
-    if (cl == loader) return true;
+    if (oopDesc::equals(cl, loader)) return true;
     cl = parent(cl);
   }
   return false;
@@ -4131,7 +4133,7 @@
 bool java_lang_System::has_security_manager() {
   InstanceKlass* ik = SystemDictionary::System_klass();
   oop base = ik->static_field_base_raw();
-  return !oopDesc::is_null(base->obj_field(static_security_offset));
+  return base->obj_field(static_security_offset) != NULL;
 }
 
 int java_lang_Class::_klass_offset;
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -132,7 +132,7 @@
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
-    if (e->object_no_keepalive() == protection_domain()) {
+    if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) {
       return e;
     }
   }
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/os.inline.hpp"
 #include "utilities/ostream.hpp"
 
 SharedPathsMiscInfo::SharedPathsMiscInfo() {
--- a/src/hotspot/share/classfile/stringTable.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,7 +29,6 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
@@ -41,6 +40,7 @@
 #include "runtime/atomic.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
--- a/src/hotspot/share/classfile/stringTable.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/stringTable.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 #define SHARE_VM_CLASSFILE_STRINGTABLE_HPP
 
-#include "memory/allocation.hpp"
 #include "utilities/hashtable.hpp"
 
 template <class T, class N> class CompactHashtable;
--- a/src/hotspot/share/classfile/symbolTable.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,7 +29,7 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
@@ -37,6 +37,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -43,7 +43,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "interpreter/interpreter.hpp"
@@ -53,6 +52,7 @@
 #include "memory/metaspaceClosure.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -75,6 +75,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
@@ -181,7 +182,7 @@
     return false;
   }
   return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() ||
-       class_loader == _java_system_loader);
+         oopDesc::equals(class_loader, _java_system_loader));
 }
 
 // Returns true if the passed class loader is the platform class loader.
@@ -390,7 +391,7 @@
        ((quicksuperk = childk->super()) != NULL) &&
 
          ((quicksuperk->name() == class_name) &&
-            (quicksuperk->class_loader()  == class_loader()))) {
+            (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) {
            return quicksuperk;
     } else {
       PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data);
@@ -524,7 +525,7 @@
   bool calledholdinglock
       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
   assert(calledholdinglock,"must hold lock for notify");
-  assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
+  assert((!oopDesc::equals(lockObject(), _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
   ObjectSynchronizer::notifyall(lockObject, THREAD);
   intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
   SystemDictionary_lock->wait();
@@ -842,7 +843,7 @@
       // If everything was OK (no exceptions, no null return value), and
       // class_loader is NOT the defining loader, do a little more bookkeeping.
       if (!HAS_PENDING_EXCEPTION && k != NULL &&
-        k->class_loader() != class_loader()) {
+        !oopDesc::equals(k->class_loader(), class_loader())) {
 
         check_constraints(d_hash, k, class_loader, false, THREAD);
 
@@ -988,7 +989,7 @@
   if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
-    guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+    guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
@@ -1746,7 +1747,7 @@
       == ObjectSynchronizer::owner_other) {
     // contention will likely happen, so increment the corresponding
     // contention counter.
-    if (loader_lock() == _system_loader_lock_obj) {
+    if (oopDesc::equals(loader_lock(), _system_loader_lock_obj)) {
       ClassLoader::sync_systemLoaderLockContentionRate()->inc();
     } else {
       ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc();
@@ -1829,7 +1830,7 @@
   BoolObjectClosure* _is_alive;
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
     guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live");
   }
 
@@ -2228,7 +2229,7 @@
       // cleared if revocation occurs too often for this type
       // NOTE that we must only do this when the class is initally
       // defined, not each time it is referenced from a new class loader
-      if (k->class_loader() == class_loader()) {
+      if (oopDesc::equals(k->class_loader(), class_loader())) {
         k->set_prototype_header(markOopDesc::biased_locking_prototype());
       }
     }
@@ -2420,7 +2421,7 @@
                                                Handle loader1, Handle loader2,
                                                bool is_method, TRAPS)  {
   // Nothing to do if loaders are the same.
-  if (loader1() == loader2()) {
+  if (oopDesc::equals(loader1(), loader2())) {
     return NULL;
   }
 
@@ -2699,7 +2700,7 @@
       mirror = ss.as_java_mirror(class_loader, protection_domain,
                                  SignatureStream::NCDFError, CHECK_(empty));
     }
-    assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string());
+    assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string());
     if (ss.at_return_type())
       rt = Handle(THREAD, mirror);
     else
@@ -2793,7 +2794,7 @@
     // which MemberName resolution doesn't handle. There's special logic on JDK side to handle them
     // (see MethodHandles.linkMethodHandleConstant() and MethodHandles.findVirtualForMH()).
   } else {
-    MethodHandles::resolve_MemberName(mname, caller, CHECK_(empty));
+    MethodHandles::resolve_MemberName(mname, caller, /*speculative_resolve*/false, CHECK_(empty));
   }
 
   // After method/field resolution succeeded, it's safe to resolve MH signature as well.
--- a/src/hotspot/share/classfile/verificationType.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/verificationType.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,6 +27,7 @@
 #include "classfile/systemDictionaryShared.hpp"
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
+#include "logging/log.hpp"
 #include "runtime/handles.inline.hpp"
 
 VerificationType VerificationType::from_tag(u1 tag) {
--- a/src/hotspot/share/classfile/verificationType.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/verificationType.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
 
 #include "classfile/systemDictionary.hpp"
-#include "memory/allocation.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/verifier.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -49,6 +49,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/share/classfile/verifier.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/classfile/verifier.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,7 +26,6 @@
 #define SHARE_VM_CLASSFILE_VERIFIER_HPP
 
 #include "classfile/verificationType.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "oops/klass.hpp"
 #include "oops/method.hpp"
 #include "runtime/handles.hpp"
--- a/src/hotspot/share/code/codeBlob.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/codeBlob.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -294,6 +294,28 @@
   return blob;
 }
 
+VtableBlob::VtableBlob(const char* name, int size) :
+  BufferBlob(name, size) {
+}
+
+VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
+  ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
+
+  VtableBlob* blob = NULL;
+  unsigned int size = sizeof(VtableBlob);
+  // align the size to CodeEntryAlignment
+  size = align_code_offset(size);
+  size += align_up(buffer_size, oopSize);
+  assert(name != NULL, "must provide a name");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = new (size) VtableBlob(name, size);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+
+  return blob;
+}
 
 //----------------------------------------------------------------------------------------------------
 // Implementation of MethodHandlesAdapterBlob
--- a/src/hotspot/share/code/codeBlob.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/codeBlob.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -58,6 +58,7 @@
 //  RuntimeBlob          : Non-compiled method code; generated glue code
 //   BufferBlob          : Used for non-relocatable code such as interpreter, stubroutines, etc.
 //    AdapterBlob        : Used to hold C2I/I2C adapters
+//    VtableBlob         : Used for holding vtable chunks
 //    MethodHandlesAdapterBlob : Used to hold MethodHandles adapters
 //   RuntimeStub         : Call to VM runtime methods
 //   SingletonBlob       : Super-class for all blobs that exist in only one instance
@@ -132,6 +133,7 @@
   virtual bool is_exception_stub() const              { return false; }
   virtual bool is_safepoint_stub() const              { return false; }
   virtual bool is_adapter_blob() const                { return false; }
+  virtual bool is_vtable_blob() const                 { return false; }
   virtual bool is_method_handles_adapter_blob() const { return false; }
   virtual bool is_aot() const                         { return false; }
   virtual bool is_compiled() const                    { return false; }
@@ -380,6 +382,7 @@
 class BufferBlob: public RuntimeBlob {
   friend class VMStructs;
   friend class AdapterBlob;
+  friend class VtableBlob;
   friend class MethodHandlesAdapterBlob;
   friend class WhiteBox;
 
@@ -425,6 +428,18 @@
   virtual bool is_adapter_blob() const { return true; }
 };
 
+//---------------------------------------------------------------------------------------------------
+class VtableBlob: public BufferBlob {
+private:
+  VtableBlob(const char*, int);
+
+public:
+  // Creation
+  static VtableBlob* create(const char* name, int buffer_size);
+
+  // Typing
+  virtual bool is_vtable_blob() const { return true; }
+};
 
 //----------------------------------------------------------------------------------------------------
 // MethodHandlesAdapterBlob: used to hold MethodHandles adapters
--- a/src/hotspot/share/code/codeCache.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/codeCache.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,15 @@
 #include "aot/aotLoader.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/compiledIC.hpp"
 #include "code/dependencies.hpp"
 #include "code/icBuffer.hpp"
 #include "code/nmethod.hpp"
 #include "code/pcDesc.hpp"
 #include "compiler/compileBroker.hpp"
-#include "gc/shared/gcLocker.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -47,6 +49,7 @@
 #include "runtime/icache.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sweeper.hpp"
 #include "services/memoryService.hpp"
 #include "trace/tracing.hpp"
@@ -1363,8 +1366,17 @@
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       print_summary(&s);
     }
-    ttyLocker ttyl;
-    tty->print("%s", s.as_string());
+    {
+      ttyLocker ttyl;
+      tty->print("%s", s.as_string());
+    }
+
+    if (heap->full_count() == 0) {
+      LogTarget(Debug, codecache) lt;
+      if (lt.is_enabled()) {
+        CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot!
+      }
+    }
   }
 
   heap->report_full();
@@ -1639,3 +1651,54 @@
             blob_count(), nmethod_count(), adapter_count(),
             unallocated_capacity());
 }
+
+//---<  BEGIN  >--- CodeHeap State Analytics.
+
+void CodeCache::aggregate(outputStream *out, const char* granularity) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::aggregate(out, (*heap), granularity);
+  }
+}
+
+void CodeCache::discard(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::discard(out, (*heap));
+  }
+}
+
+void CodeCache::print_usedSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_usedSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_freeSpace(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_freeSpace(out, (*heap));
+  }
+}
+
+void CodeCache::print_count(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_count(out, (*heap));
+  }
+}
+
+void CodeCache::print_space(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_space(out, (*heap));
+  }
+}
+
+void CodeCache::print_age(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_age(out, (*heap));
+  }
+}
+
+void CodeCache::print_names(outputStream *out) {
+  FOR_ALL_ALLOCABLE_HEAPS(heap) {
+    CodeHeapState::print_names(out, (*heap));
+  }
+}
+//---<  END  >--- CodeHeap State Analytics.
--- a/src/hotspot/share/code/codeCache.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/codeCache.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -296,6 +296,17 @@
     CodeHeap* heap = get_code_heap(code_blob_type);
     return (heap != NULL) ? heap->full_count() : 0;
   }
+
+  // CodeHeap State Analytics.
+  // interface methods for CodeHeap printing, called by CompileBroker
+  static void aggregate(outputStream *out, const char* granularity);
+  static void discard(outputStream *out);
+  static void print_usedSpace(outputStream *out);
+  static void print_freeSpace(outputStream *out);
+  static void print_count(outputStream *out);
+  static void print_space(outputStream *out);
+  static void print_age(outputStream *out);
+  static void print_names(outputStream *out);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -0,0 +1,2338 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeHeapState.hpp"
+#include "compiler/compileBroker.hpp"
+#include "runtime/sweeper.hpp"
+
+// -------------------------
+// |  General Description  |
+// -------------------------
+// The CodeHeap state analytics are divided in two parts.
+// The first part examines the entire CodeHeap and aggregates all
+// information that is believed useful/important.
+//
+// Aggregation condenses the information of a piece of the CodeHeap
+// (4096 bytes by default) into an analysis granule. These granules
+// contain enough detail to gain initial insight while keeping the
+// internal sttructure sizes in check.
+//
+// The CodeHeap is a living thing. Therefore, the aggregate is collected
+// under the CodeCache_lock. The subsequent print steps are only locked
+// against concurrent aggregations. That keeps the impact on
+// "normal operation" (JIT compiler and sweeper activity) to a minimum.
+//
+// The second part, which consists of several, independent steps,
+// prints the previously collected information with emphasis on
+// various aspects.
+//
+// Data collection and printing is done on an "on request" basis.
+// While no request is being processed, there is no impact on performance.
+// The CodeHeap state analytics do have some memory footprint.
+// The "aggregate" step allocates some data structures to hold the aggregated
+// information for later output. These data structures live until they are
+// explicitly discarded (function "discard") or until the VM terminates.
+// There is one exception: the function "all" does not leave any data
+// structures allocated.
+//
+// Requests for real-time, on-the-fly analysis can be issued via
+//   jcmd <pid> Compiler.CodeHeap_Analytics [<function>] [<granularity>]
+//
+// If you are (only) interested in how the CodeHeap looks like after running
+// a sample workload, you can use the command line option
+//   -Xlog:codecache=Trace
+//
+// To see the CodeHeap state in case of a "CodeCache full" condition, start the
+// VM with the
+//   -Xlog:codecache=Debug
+// command line option. It will produce output only for the first time the
+// condition is recognized.
+//
+// Both command line option variants produce output identical to the jcmd function
+//   jcmd <pid> Compiler.CodeHeap_Analytics all 4096
+// ---------------------------------------------------------------------------------
+
+// With this declaration macro, it is possible to switch between
+//  - direct output into an argument-passed outputStream and
+//  - buffered output into a bufferedStream with subsequent flush
+//    of the filled buffer to the outputStream.
+#define USE_STRINGSTREAM
+#define HEX32_FORMAT  "0x%x"  // just a helper format string used below multiple times
+//
+// Writing to a bufferedStream buffer first has a significant advantage:
+// It uses noticeably less cpu cycles and reduces (when wirting to a
+// network file) the required bandwidth by at least a factor of ten.
+// That clearly makes up for the increased code complexity.
+#if defined(USE_STRINGSTREAM)
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    /* _anyst  name of the stream as used in the code */  \
+    /* _outst  stream where final output will go to   */  \
+    ResourceMark rm;                                      \
+    bufferedStream   _sstobj = bufferedStream(4*K);       \
+    bufferedStream*  _sstbuf = &_sstobj;                  \
+    outputStream*    _outbuf = _outst;                    \
+    bufferedStream*  _anyst  = &_sstobj; /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _sstbuf->print("%s", termString);                     \
+    _outbuf->print("%s", _sstbuf->as_string());           \
+    _sstbuf->reset();
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    { ttyLocker ttyl;/* keep this output block together */\
+      STRINGSTREAM_FLUSH(termString)                      \
+    }
+#else
+#define STRINGSTREAM_DECL(_anyst, _outst)                 \
+    outputStream*  _outbuf = _outst;                      \
+    outputStream*  _anyst  = _outst;   /* any stream. Use this to just print - no buffer flush.  */
+
+#define STRINGSTREAM_FLUSH(termString)                    \
+    _outbuf->print("%s", termString);
+
+#define STRINGSTREAM_FLUSH_LOCKED(termString)             \
+    _outbuf->print("%s", termString);
+#endif
+
+const char  blobTypeChar[] = {' ', 'N', 'I', 'X', 'Z', 'U', 'R', '?', 'D', 'T', 'E', 'S', 'A', 'M', 'B', 'L' };
+const char* blobTypeName[] = {"noType"
+                             ,     "nMethod (active)"
+                             ,          "nMethod (inactive)"
+                             ,               "nMethod (deopt)"
+                             ,                    "nMethod (zombie)"
+                             ,                         "nMethod (unloaded)"
+                             ,                              "runtime stub"
+                             ,                                   "ricochet stub"
+                             ,                                        "deopt stub"
+                             ,                                             "uncommon trap stub"
+                             ,                                                  "exception stub"
+                             ,                                                       "safepoint stub"
+                             ,                                                            "adapter blob"
+                             ,                                                                 "MH adapter blob"
+                             ,                                                                      "buffer blob"
+                             ,                                                                           "lastType"
+                             };
+const char* compTypeName[] = { "none", "c1", "c2", "jvmci" };
+
+// Be prepared for ten different CodeHeap segments. Should be enough for a few years.
+const  unsigned int        nSizeDistElements = 31;  // logarithmic range growth, max size: 2**32
+const  unsigned int        maxTopSizeBlocks  = 50;
+const  unsigned int        tsbStopper        = 2 * maxTopSizeBlocks;
+const  unsigned int        maxHeaps          = 10;
+static unsigned int        nHeaps            = 0;
+static struct CodeHeapStat CodeHeapStatArray[maxHeaps];
+
+// static struct StatElement *StatArray      = NULL;
+static StatElement* StatArray             = NULL;
+static int          log2_seg_size         = 0;
+static size_t       seg_size              = 0;
+static size_t       alloc_granules        = 0;
+static size_t       granule_size          = 0;
+static bool         segment_granules      = false;
+static unsigned int nBlocks_t1            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_t2            = 0;  // counting "in_use" nmethods only.
+static unsigned int nBlocks_alive         = 0;  // counting "not_used" and "not_entrant" nmethods only.
+static unsigned int nBlocks_dead          = 0;  // counting "zombie" and "unloaded" methods only.
+static unsigned int nBlocks_unloaded      = 0;  // counting "unloaded" nmethods only. This is a transien state.
+static unsigned int nBlocks_stub          = 0;
+
+static struct FreeBlk*          FreeArray = NULL;
+static unsigned int      alloc_freeBlocks = 0;
+
+static struct TopSizeBlk*    TopSizeArray = NULL;
+static unsigned int   alloc_topSizeBlocks = 0;
+static unsigned int    used_topSizeBlocks = 0;
+
+static struct SizeDistributionElement*  SizeDistributionArray = NULL;
+
+// nMethod temperature (hotness) indicators.
+static int                     avgTemp    = 0;
+static int                     maxTemp    = 0;
+static int                     minTemp    = 0;
+
+static unsigned int  latest_compilation_id   = 0;
+static volatile bool initialization_complete = false;
+
+const char* CodeHeapState::get_heapName(CodeHeap* heap) {
+  if (SegmentedCodeCache) {
+    return heap->name();
+  } else {
+    return "CodeHeap";
+  }
+}
+
+// returns the index for the heap being processed.
+unsigned int CodeHeapState::findHeapIndex(outputStream* out, const char* heapName) {
+  if (heapName == NULL) {
+    return maxHeaps;
+  }
+  if (SegmentedCodeCache) {
+    // Search for a pre-existing entry. If found, return that index.
+    for (unsigned int i = 0; i < nHeaps; i++) {
+      if (CodeHeapStatArray[i].heapName != NULL && strcmp(heapName, CodeHeapStatArray[i].heapName) == 0) {
+        return i;
+      }
+    }
+
+    // check if there are more code heap segments than we can handle.
+    if (nHeaps == maxHeaps) {
+      out->print_cr("Too many heap segments for current limit(%d).", maxHeaps);
+      return maxHeaps;
+    }
+
+    // allocate new slot in StatArray.
+    CodeHeapStatArray[nHeaps].heapName = heapName;
+    return nHeaps++;
+  } else {
+    nHeaps = 1;
+    CodeHeapStatArray[0].heapName = heapName;
+    return 0; // This is the default index if CodeCache is not segmented.
+  }
+}
+
+void CodeHeapState::get_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    StatArray             = CodeHeapStatArray[ix].StatArray;
+    seg_size              = CodeHeapStatArray[ix].segment_size;
+    log2_seg_size         = seg_size == 0 ? 0 : exact_log2(seg_size);
+    alloc_granules        = CodeHeapStatArray[ix].alloc_granules;
+    granule_size          = CodeHeapStatArray[ix].granule_size;
+    segment_granules      = CodeHeapStatArray[ix].segment_granules;
+    nBlocks_t1            = CodeHeapStatArray[ix].nBlocks_t1;
+    nBlocks_t2            = CodeHeapStatArray[ix].nBlocks_t2;
+    nBlocks_alive         = CodeHeapStatArray[ix].nBlocks_alive;
+    nBlocks_dead          = CodeHeapStatArray[ix].nBlocks_dead;
+    nBlocks_unloaded      = CodeHeapStatArray[ix].nBlocks_unloaded;
+    nBlocks_stub          = CodeHeapStatArray[ix].nBlocks_stub;
+    FreeArray             = CodeHeapStatArray[ix].FreeArray;
+    alloc_freeBlocks      = CodeHeapStatArray[ix].alloc_freeBlocks;
+    TopSizeArray          = CodeHeapStatArray[ix].TopSizeArray;
+    alloc_topSizeBlocks   = CodeHeapStatArray[ix].alloc_topSizeBlocks;
+    used_topSizeBlocks    = CodeHeapStatArray[ix].used_topSizeBlocks;
+    SizeDistributionArray = CodeHeapStatArray[ix].SizeDistributionArray;
+    avgTemp               = CodeHeapStatArray[ix].avgTemp;
+    maxTemp               = CodeHeapStatArray[ix].maxTemp;
+    minTemp               = CodeHeapStatArray[ix].minTemp;
+  } else {
+    StatArray             = NULL;
+    seg_size              = 0;
+    log2_seg_size         = 0;
+    alloc_granules        = 0;
+    granule_size          = 0;
+    segment_granules      = false;
+    nBlocks_t1            = 0;
+    nBlocks_t2            = 0;
+    nBlocks_alive         = 0;
+    nBlocks_dead          = 0;
+    nBlocks_unloaded      = 0;
+    nBlocks_stub          = 0;
+    FreeArray             = NULL;
+    alloc_freeBlocks      = 0;
+    TopSizeArray          = NULL;
+    alloc_topSizeBlocks   = 0;
+    used_topSizeBlocks    = 0;
+    SizeDistributionArray = NULL;
+    avgTemp               = 0;
+    maxTemp               = 0;
+    minTemp               = 0;
+  }
+}
+
+void CodeHeapState::set_HeapStatGlobals(outputStream* out, const char* heapName) {
+  unsigned int ix = findHeapIndex(out, heapName);
+  if (ix < maxHeaps) {
+    CodeHeapStatArray[ix].StatArray             = StatArray;
+    CodeHeapStatArray[ix].segment_size          = seg_size;
+    CodeHeapStatArray[ix].alloc_granules        = alloc_granules;
+    CodeHeapStatArray[ix].granule_size          = granule_size;
+    CodeHeapStatArray[ix].segment_granules      = segment_granules;
+    CodeHeapStatArray[ix].nBlocks_t1            = nBlocks_t1;
+    CodeHeapStatArray[ix].nBlocks_t2            = nBlocks_t2;
+    CodeHeapStatArray[ix].nBlocks_alive         = nBlocks_alive;
+    CodeHeapStatArray[ix].nBlocks_dead          = nBlocks_dead;
+    CodeHeapStatArray[ix].nBlocks_unloaded      = nBlocks_unloaded;
+    CodeHeapStatArray[ix].nBlocks_stub          = nBlocks_stub;
+    CodeHeapStatArray[ix].FreeArray             = FreeArray;
+    CodeHeapStatArray[ix].alloc_freeBlocks      = alloc_freeBlocks;
+    CodeHeapStatArray[ix].TopSizeArray          = TopSizeArray;
+    CodeHeapStatArray[ix].alloc_topSizeBlocks   = alloc_topSizeBlocks;
+    CodeHeapStatArray[ix].used_topSizeBlocks    = used_topSizeBlocks;
+    CodeHeapStatArray[ix].SizeDistributionArray = SizeDistributionArray;
+    CodeHeapStatArray[ix].avgTemp               = avgTemp;
+    CodeHeapStatArray[ix].maxTemp               = maxTemp;
+    CodeHeapStatArray[ix].minTemp               = minTemp;
+  }
+}
+
+//---<  get a new statistics array  >---
+void CodeHeapState::prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName) {
+  if (StatArray == NULL) {
+    StatArray      = new StatElement[nElem];
+    //---<  reset some counts  >---
+    alloc_granules = nElem;
+    granule_size   = granularity;
+  }
+
+  if (StatArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Statistics could not be collected for %s, probably out of memory.", heapName);
+    out->print_cr("Current granularity is " SIZE_FORMAT " bytes. Try a coarser granularity.", granularity);
+    alloc_granules = 0;
+    granule_size   = 0;
+  } else {
+    //---<  initialize statistics array  >---
+    memset((void*)StatArray, 0, nElem*sizeof(StatElement));
+  }
+}
+
+//---<  get a new free block array  >---
+void CodeHeapState::prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (FreeArray == NULL) {
+    FreeArray      = new FreeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_freeBlocks = nElem;
+  }
+
+  if (FreeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Free space analysis cannot be done for %s, probably out of memory.", heapName);
+    alloc_freeBlocks = 0;
+  } else {
+    //---<  initialize free block array  >---
+    memset((void*)FreeArray, 0, alloc_freeBlocks*sizeof(FreeBlk));
+  }
+}
+
+//---<  get a new TopSizeArray  >---
+void CodeHeapState::prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (TopSizeArray == NULL) {
+    TopSizeArray   = new TopSizeBlk[nElem];
+    //---<  reset some counts  >---
+    alloc_topSizeBlocks = nElem;
+    used_topSizeBlocks  = 0;
+  }
+
+  if (TopSizeArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Top-%d list of largest CodeHeap blocks can not be collected for %s, probably out of memory.", nElem, heapName);
+    alloc_topSizeBlocks = 0;
+  } else {
+    //---<  initialize TopSizeArray  >---
+    memset((void*)TopSizeArray, 0, nElem*sizeof(TopSizeBlk));
+    used_topSizeBlocks  = 0;
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName) {
+  if (SizeDistributionArray == NULL) {
+    SizeDistributionArray = new SizeDistributionElement[nElem];
+  }
+
+  if (SizeDistributionArray == NULL) {
+    //---<  just do nothing if allocation failed  >---
+    out->print_cr("Size distribution can not be collected for %s, probably out of memory.", heapName);
+  } else {
+    //---<  initialize SizeDistArray  >---
+    memset((void*)SizeDistributionArray, 0, nElem*sizeof(SizeDistributionElement));
+    // Logarithmic range growth. First range starts at _segment_size.
+    SizeDistributionArray[log2_seg_size-1].rangeEnd = 1U;
+    for (unsigned int i = log2_seg_size; i < nElem; i++) {
+      SizeDistributionArray[i].rangeStart = 1U << (i     - log2_seg_size);
+      SizeDistributionArray[i].rangeEnd   = 1U << ((i+1) - log2_seg_size);
+    }
+  }
+}
+
+//---<  get a new SizeDistributionArray  >---
+void CodeHeapState::update_SizeDistArray(outputStream* out, unsigned int len) {
+  if (SizeDistributionArray != NULL) {
+    for (unsigned int i = log2_seg_size-1; i < nSizeDistElements; i++) {
+      if ((SizeDistributionArray[i].rangeStart <= len) && (len < SizeDistributionArray[i].rangeEnd)) {
+        SizeDistributionArray[i].lenSum += len;
+        SizeDistributionArray[i].count++;
+        break;
+      }
+    }
+  }
+}
+
+void CodeHeapState::discard_StatArray(outputStream* out) {
+  if (StatArray != NULL) {
+    delete StatArray;
+    StatArray        = NULL;
+    alloc_granules   = 0;
+    granule_size     = 0;
+  }
+}
+
+void CodeHeapState::discard_FreeArray(outputStream* out) {
+  if (FreeArray != NULL) {
+    delete[] FreeArray;
+    FreeArray        = NULL;
+    alloc_freeBlocks = 0;
+  }
+}
+
+void CodeHeapState::discard_TopSizeArray(outputStream* out) {
+  if (TopSizeArray != NULL) {
+    delete[] TopSizeArray;
+    TopSizeArray        = NULL;
+    alloc_topSizeBlocks = 0;
+    used_topSizeBlocks  = 0;
+  }
+}
+
+void CodeHeapState::discard_SizeDistArray(outputStream* out) {
+  if (SizeDistributionArray != NULL) {
+    delete[] SizeDistributionArray;
+    SizeDistributionArray = NULL;
+  }
+}
+
+// Discard all allocated internal data structures.
+// This should be done after an analysis session is completed.
+void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  if (nHeaps > 0) {
+    for (unsigned int ix = 0; ix < nHeaps; ix++) {
+      get_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      discard_StatArray(out);
+      discard_FreeArray(out);
+      discard_TopSizeArray(out);
+      discard_SizeDistArray(out);
+      set_HeapStatGlobals(out, CodeHeapStatArray[ix].heapName);
+      CodeHeapStatArray[ix].heapName = NULL;
+    }
+    nHeaps = 0;
+  }
+}
+
+void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* granularity_request) {
+  unsigned int nBlocks_free    = 0;
+  unsigned int nBlocks_used    = 0;
+  unsigned int nBlocks_zomb    = 0;
+  unsigned int nBlocks_disconn = 0;
+  unsigned int nBlocks_notentr = 0;
+
+  //---<  max & min of TopSizeArray  >---
+  //  it is sufficient to have these sizes as 32bit unsigned ints.
+  //  The CodeHeap is limited in size to 4GB. Furthermore, the sizes
+  //  are stored in _segment_size units, scaling them down by a factor of 64 (at least).
+  unsigned int  currMax          = 0;
+  unsigned int  currMin          = 0;
+  unsigned int  currMin_ix       = 0;
+  unsigned long total_iterations = 0;
+
+  bool  done             = false;
+  const int min_granules = 256;
+  const int max_granules = 512*K; // limits analyzable CodeHeap (with segment_granules) to 32M..128M
+                                  // results in StatArray size of 20M (= max_granules * 40 Bytes per element)
+                                  // For a 1GB CodeHeap, the granule size must be at least 2kB to not violate the max_granles limit.
+  const char* heapName   = get_heapName(heap);
+  STRINGSTREAM_DECL(ast, out)
+
+  if (!initialization_complete) {
+    memset(CodeHeapStatArray, 0, sizeof(CodeHeapStatArray));
+    initialization_complete = true;
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (general remarks)", NULL);
+    ast->print_cr("   The code heap analysis function provides deep insights into\n"
+                  "   the inner workings and the internal state of the Java VM's\n"
+                  "   code cache - the place where all the JVM generated machine\n"
+                  "   code is stored.\n"
+                  "   \n"
+                  "   This function is designed and provided for support engineers\n"
+                  "   to help them understand and solve issues in customer systems.\n"
+                  "   It is not intended for use and interpretation by other persons.\n"
+                  "   \n");
+    STRINGSTREAM_FLUSH("")
+  }
+  get_HeapStatGlobals(out, heapName);
+
+
+  // Since we are (and must be) analyzing the CodeHeap contents under the CodeCache_lock,
+  // all heap information is "constant" and can be safely extracted/calculated before we
+  // enter the while() loop. Actually, the loop will only be iterated once.
+  char*  low_bound     = heap->low_boundary();
+  size_t size          = heap->capacity();
+  size_t res_size      = heap->max_capacity();
+  seg_size             = heap->segment_size();
+  log2_seg_size        = seg_size == 0 ? 0 : exact_log2(seg_size);  // This is a global static value.
+
+  if (seg_size == 0) {
+    printBox(ast, '-', "Heap not fully initialized yet, segment size is zero for segment ", heapName);
+    STRINGSTREAM_FLUSH("")
+    return;
+  }
+
+  // Calculate granularity of analysis (and output).
+  //   The CodeHeap is managed (allocated) in segments (units) of CodeCacheSegmentSize.
+  //   The CodeHeap can become fairly large, in particular in productive real-life systems.
+  //
+  //   It is often neither feasible nor desirable to aggregate the data with the highest possible
+  //   level of detail, i.e. inspecting and printing each segment on its own.
+  //
+  //   The granularity parameter allows to specify the level of detail available in the analysis.
+  //   It must be a positive multiple of the segment size and should be selected such that enough
+  //   detail is provided while, at the same time, the printed output does not explode.
+  //
+  //   By manipulating the granularity value, we enforce that at least min_granules units
+  //   of analysis are available. We also enforce an upper limit of max_granules units to
+  //   keep the amount of allocated storage in check.
+  //
+  //   Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
+  //   This is necessary to prevent an unsigned short overflow while accumulating space information.
+  //
+  size_t granularity = strtol(granularity_request, NULL, 0);
+  if (granularity > size) {
+    granularity = size;
+  }
+  if (size/granularity < min_granules) {
+    granularity = size/min_granules;                                   // at least min_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity < seg_size) {
+    granularity = seg_size;                                            // must be at least seg_size
+  }
+  if (size/granularity > max_granules) {
+    granularity = size/max_granules;                                   // at most max_granules granules
+  }
+  granularity = granularity & (~(seg_size - 1));                       // must be multiple of seg_size
+  if (granularity>>log2_seg_size >= (1L<<sizeof(unsigned short)*8)) {
+    granularity = ((1L<<(sizeof(unsigned short)*8))-1)<<log2_seg_size; // Limit: (64k-1) * seg_size
+  }
+  segment_granules = granularity == seg_size;
+  size_t granules  = (size + (granularity-1))/granularity;
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (used blocks) for segment ", heapName);
+  ast->print_cr("   The aggregate step takes an aggregated snapshot of the CodeHeap.\n"
+                "   Subsequent print functions create their output based on this snapshot.\n"
+                "   The CodeHeap is a living thing, and every effort has been made for the\n"
+                "   collected data to be consistent. Only the method names and signatures\n"
+                "   are retrieved at print time. That may lead to rare cases where the\n"
+                "   name of a method is no longer available, e.g. because it was unloaded.\n");
+  ast->print_cr("   CodeHeap committed size " SIZE_FORMAT "K (" SIZE_FORMAT "M), reserved size " SIZE_FORMAT "K (" SIZE_FORMAT "M), %d%% occupied.",
+                size/(size_t)K, size/(size_t)M, res_size/(size_t)K, res_size/(size_t)M, (unsigned int)(100.0*size/res_size));
+  ast->print_cr("   CodeHeap allocation segment size is " SIZE_FORMAT " bytes. This is the smallest possible granularity.", seg_size);
+  ast->print_cr("   CodeHeap (committed part) is mapped to " SIZE_FORMAT " granules of size " SIZE_FORMAT " bytes.", granules, granularity);
+  ast->print_cr("   Each granule takes " SIZE_FORMAT " bytes of C heap, that is " SIZE_FORMAT "K in total for statistics data.", sizeof(StatElement), (sizeof(StatElement)*granules)/(size_t)K);
+  ast->print_cr("   The number of granules is limited to %dk, requiring a granules size of at least %d bytes for a 1GB heap.", (unsigned int)(max_granules/K), (unsigned int)(G/max_granules));
+  STRINGSTREAM_FLUSH("\n")
+
+
+  while (!done) {
+    //---<  reset counters with every aggregation  >---
+    nBlocks_t1       = 0;
+    nBlocks_t2       = 0;
+    nBlocks_alive    = 0;
+    nBlocks_dead     = 0;
+    nBlocks_unloaded = 0;
+    nBlocks_stub     = 0;
+
+    nBlocks_free     = 0;
+    nBlocks_used     = 0;
+    nBlocks_zomb     = 0;
+    nBlocks_disconn  = 0;
+    nBlocks_notentr  = 0;
+
+    //---<  discard old arrays if size does not match  >---
+    if (granules != alloc_granules) {
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+
+    //---<  allocate arrays if they don't yet exist, initialize  >---
+    prepare_StatArray(out, granules, granularity, heapName);
+    if (StatArray == NULL) {
+      set_HeapStatGlobals(out, heapName);
+      return;
+    }
+    prepare_TopSizeArray(out, maxTopSizeBlocks, heapName);
+    prepare_SizeDistArray(out, nSizeDistElements, heapName);
+
+    latest_compilation_id = CompileBroker::get_compilation_id();
+    unsigned int highest_compilation_id = 0;
+    size_t       usedSpace     = 0;
+    size_t       t1Space       = 0;
+    size_t       t2Space       = 0;
+    size_t       aliveSpace    = 0;
+    size_t       disconnSpace  = 0;
+    size_t       notentrSpace  = 0;
+    size_t       deadSpace     = 0;
+    size_t       unloadedSpace = 0;
+    size_t       stubSpace     = 0;
+    size_t       freeSpace     = 0;
+    size_t       maxFreeSize   = 0;
+    HeapBlock*   maxFreeBlock  = NULL;
+    bool         insane        = false;
+
+    int64_t hotnessAccumulator = 0;
+    unsigned int n_methods     = 0;
+    avgTemp       = 0;
+    minTemp       = (int)(res_size > M ? (res_size/M)*2 : 1);
+    maxTemp       = -minTemp;
+
+    for (HeapBlock *h = heap->first_block(); h != NULL && !insane; h = heap->next_block(h)) {
+      unsigned int hb_len     = (unsigned int)h->length();  // despite being size_t, length can never overflow an unsigned int.
+      size_t       hb_bytelen = ((size_t)hb_len)<<log2_seg_size;
+      unsigned int ix_beg     = (unsigned int)(((char*)h-low_bound)/granule_size);
+      unsigned int ix_end     = (unsigned int)(((char*)h-low_bound+(hb_bytelen-1))/granule_size);
+      unsigned int compile_id = 0;
+      CompLevel    comp_lvl   = CompLevel_none;
+      compType     cType      = noComp;
+      blobType     cbType     = noType;
+
+      //---<  some sanity checks  >---
+      // Do not assert here, just check, print error message and return.
+      // This is a diagnostic function. It is not supposed to tear down the VM.
+      if ((char*)h <  low_bound ) {
+        insane = true; ast->print_cr("Sanity check: HeapBlock @%p below low bound (%p)", (char*)h, low_bound);
+      }
+      if (ix_end   >= granules  ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) out of bounds (" SIZE_FORMAT ")", ix_end, granules);
+      }
+      if (size     != heap->capacity()) {
+        insane = true; ast->print_cr("Sanity check: code heap capacity has changed (" SIZE_FORMAT "K to " SIZE_FORMAT "K)", size/(size_t)K, heap->capacity()/(size_t)K);
+      }
+      if (ix_beg   >  ix_end    ) {
+        insane = true; ast->print_cr("Sanity check: end index (%d) lower than begin index (%d)", ix_end, ix_beg);
+      }
+      if (insane) {
+        STRINGSTREAM_FLUSH("")
+        continue;
+      }
+
+      if (h->free()) {
+        nBlocks_free++;
+        freeSpace    += hb_bytelen;
+        if (hb_bytelen > maxFreeSize) {
+          maxFreeSize   = hb_bytelen;
+          maxFreeBlock  = h;
+        }
+      } else {
+        update_SizeDistArray(out, hb_len);
+        nBlocks_used++;
+        usedSpace    += hb_bytelen;
+        CodeBlob* cb  = (CodeBlob*)heap->find_start(h);
+        if (cb != NULL) {
+          cbType = get_cbType(cb);
+          if (cb->is_nmethod()) {
+            compile_id = ((nmethod*)cb)->compile_id();
+            comp_lvl   = (CompLevel)((nmethod*)cb)->comp_level();
+            if (((nmethod*)cb)->is_compiled_by_c1()) {
+              cType = c1;
+            }
+            if (((nmethod*)cb)->is_compiled_by_c2()) {
+              cType = c2;
+            }
+            if (((nmethod*)cb)->is_compiled_by_jvmci()) {
+              cType = jvmci;
+            }
+            switch (cbType) {
+              case nMethod_inuse: { // only for executable methods!!!
+                // space for these cbs is accounted for later.
+                int temperature = ((nmethod*)cb)->hotness_counter();
+                hotnessAccumulator += temperature;
+                n_methods++;
+                maxTemp = (temperature > maxTemp) ? temperature : maxTemp;
+                minTemp = (temperature < minTemp) ? temperature : minTemp;
+                break;
+              }
+              case nMethod_notused:
+                nBlocks_alive++;
+                nBlocks_disconn++;
+                aliveSpace     += hb_bytelen;
+                disconnSpace   += hb_bytelen;
+                break;
+              case nMethod_notentrant:  // equivalent to nMethod_alive
+                nBlocks_alive++;
+                nBlocks_notentr++;
+                aliveSpace     += hb_bytelen;
+                notentrSpace   += hb_bytelen;
+                break;
+              case nMethod_unloaded:
+                nBlocks_unloaded++;
+                unloadedSpace  += hb_bytelen;
+                break;
+              case nMethod_dead:
+                nBlocks_dead++;
+                deadSpace      += hb_bytelen;
+                break;
+              default:
+                break;
+            }
+          }
+
+          //------------------------------------------
+          //---<  register block in TopSizeArray  >---
+          //------------------------------------------
+          if (alloc_topSizeBlocks > 0) {
+            if (used_topSizeBlocks == 0) {
+              TopSizeArray[0].start    = h;
+              TopSizeArray[0].len      = hb_len;
+              TopSizeArray[0].index    = tsbStopper;
+              TopSizeArray[0].compiler = cType;
+              TopSizeArray[0].level    = comp_lvl;
+              TopSizeArray[0].type     = cbType;
+              currMax    = hb_len;
+              currMin    = hb_len;
+              currMin_ix = 0;
+              used_topSizeBlocks++;
+            // This check roughly cuts 5000 iterations (JVM98, mixed, dbg, termination stats):
+            } else if ((used_topSizeBlocks < alloc_topSizeBlocks) && (hb_len < currMin)) {
+              //---<  all blocks in list are larger, but there is room left in array  >---
+              TopSizeArray[currMin_ix].index = used_topSizeBlocks;
+              TopSizeArray[used_topSizeBlocks].start    = h;
+              TopSizeArray[used_topSizeBlocks].len      = hb_len;
+              TopSizeArray[used_topSizeBlocks].index    = tsbStopper;
+              TopSizeArray[used_topSizeBlocks].compiler = cType;
+              TopSizeArray[used_topSizeBlocks].level    = comp_lvl;
+              TopSizeArray[used_topSizeBlocks].type     = cbType;
+              currMin    = hb_len;
+              currMin_ix = used_topSizeBlocks;
+              used_topSizeBlocks++;
+            } else {
+              // This check cuts total_iterations by a factor of 6 (JVM98, mixed, dbg, termination stats):
+              //   We don't need to search the list if we know beforehand that the current block size is
+              //   smaller than the currently recorded minimum and there is no free entry left in the list.
+              if (!((used_topSizeBlocks == alloc_topSizeBlocks) && (hb_len <= currMin))) {
+                if (currMax < hb_len) {
+                  currMax = hb_len;
+                }
+                unsigned int i;
+                unsigned int prev_i  = tsbStopper;
+                unsigned int limit_i =  0;
+                for (i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+                  if (limit_i++ >= alloc_topSizeBlocks) {
+                    insane = true; break; // emergency exit
+                  }
+                  if (i >= used_topSizeBlocks)  {
+                    insane = true; break; // emergency exit
+                  }
+                  total_iterations++;
+                  if (TopSizeArray[i].len < hb_len) {
+                    //---<  We want to insert here, element <i> is smaller than the current one  >---
+                    if (used_topSizeBlocks < alloc_topSizeBlocks) { // still room for a new entry to insert
+                      // old entry gets moved to the next free element of the array.
+                      // That's necessary to keep the entry for the largest block at index 0.
+                      // This move might cause the current minimum to be moved to another place
+                      if (i == currMin_ix) {
+                        assert(TopSizeArray[i].len == currMin, "sort error");
+                        currMin_ix = used_topSizeBlocks;
+                      }
+                      memcpy((void*)&TopSizeArray[used_topSizeBlocks], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                      TopSizeArray[i].start    = h;
+                      TopSizeArray[i].len      = hb_len;
+                      TopSizeArray[i].index    = used_topSizeBlocks;
+                      TopSizeArray[i].compiler = cType;
+                      TopSizeArray[i].level    = comp_lvl;
+                      TopSizeArray[i].type     = cbType;
+                      used_topSizeBlocks++;
+                    } else { // no room for new entries, current block replaces entry for smallest block
+                      //---<  Find last entry (entry for smallest remembered block)  >---
+                      unsigned int      j  = i;
+                      unsigned int prev_j  = tsbStopper;
+                      unsigned int limit_j = 0;
+                      while (TopSizeArray[j].index != tsbStopper) {
+                        if (limit_j++ >= alloc_topSizeBlocks) {
+                          insane = true; break; // emergency exit
+                        }
+                        if (j >= used_topSizeBlocks)  {
+                          insane = true; break; // emergency exit
+                        }
+                        total_iterations++;
+                        prev_j = j;
+                        j      = TopSizeArray[j].index;
+                      }
+                      if (!insane) {
+                        if (prev_j == tsbStopper) {
+                          //---<  Above while loop did not iterate, we already are the min entry  >---
+                          //---<  We have to just replace the smallest entry                      >---
+                          currMin    = hb_len;
+                          currMin_ix = j;
+                          TopSizeArray[j].start    = h;
+                          TopSizeArray[j].len      = hb_len;
+                          TopSizeArray[j].index    = tsbStopper; // already set!!
+                          TopSizeArray[j].compiler = cType;
+                          TopSizeArray[j].level    = comp_lvl;
+                          TopSizeArray[j].type     = cbType;
+                        } else {
+                          //---<  second-smallest entry is now smallest  >---
+                          TopSizeArray[prev_j].index = tsbStopper;
+                          currMin    = TopSizeArray[prev_j].len;
+                          currMin_ix = prev_j;
+                          //---<  smallest entry gets overwritten  >---
+                          memcpy((void*)&TopSizeArray[j], (void*)&TopSizeArray[i], sizeof(TopSizeBlk));
+                          TopSizeArray[i].start    = h;
+                          TopSizeArray[i].len      = hb_len;
+                          TopSizeArray[i].index    = j;
+                          TopSizeArray[i].compiler = cType;
+                          TopSizeArray[i].level    = comp_lvl;
+                          TopSizeArray[i].type     = cbType;
+                        }
+                      } // insane
+                    }
+                    break;
+                  }
+                  prev_i = i;
+                }
+                if (insane) {
+                  // Note: regular analysis could probably continue by resetting "insane" flag.
+                  out->print_cr("Possible loop in TopSizeBlocks list detected. Analysis aborted.");
+                  discard_TopSizeArray(out);
+                }
+              }
+            }
+          }
+          //----------------------------------------------
+          //---<  END register block in TopSizeArray  >---
+          //----------------------------------------------
+        } else {
+          nBlocks_zomb++;
+        }
+
+        if (ix_beg == ix_end) {
+          StatArray[ix_beg].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)hb_len;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)hb_len;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)hb_len;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)hb_len;
+              break;
+          }
+        } else {
+          unsigned int beg_space = (unsigned int)(granule_size - ((char*)h - low_bound - ix_beg*granule_size));
+          unsigned int end_space = (unsigned int)(hb_bytelen - beg_space - (ix_end-ix_beg-1)*granule_size);
+          beg_space = beg_space>>log2_seg_size;  // store in units of _segment_size
+          end_space = end_space>>log2_seg_size;  // store in units of _segment_size
+          StatArray[ix_beg].type = cbType;
+          StatArray[ix_end].type = cbType;
+          switch (cbType) {
+            case nMethod_inuse:
+              highest_compilation_id = (highest_compilation_id >= compile_id) ? highest_compilation_id : compile_id;
+              if (comp_lvl < CompLevel_full_optimization) {
+                nBlocks_t1++;
+                t1Space   += hb_bytelen;
+                StatArray[ix_beg].t1_count++;
+                StatArray[ix_beg].t1_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t1_age    = StatArray[ix_beg].t1_age < compile_id ? compile_id : StatArray[ix_beg].t1_age;
+
+                StatArray[ix_end].t1_count++;
+                StatArray[ix_end].t1_space += (unsigned short)end_space;
+                StatArray[ix_end].t1_age    = StatArray[ix_end].t1_age < compile_id ? compile_id : StatArray[ix_end].t1_age;
+              } else {
+                nBlocks_t2++;
+                t2Space   += hb_bytelen;
+                StatArray[ix_beg].t2_count++;
+                StatArray[ix_beg].t2_space += (unsigned short)beg_space;
+                StatArray[ix_beg].t2_age    = StatArray[ix_beg].t2_age < compile_id ? compile_id : StatArray[ix_beg].t2_age;
+
+                StatArray[ix_end].t2_count++;
+                StatArray[ix_end].t2_space += (unsigned short)end_space;
+                StatArray[ix_end].t2_age    = StatArray[ix_end].t2_age < compile_id ? compile_id : StatArray[ix_end].t2_age;
+              }
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_alive:
+              StatArray[ix_beg].tx_count++;
+              StatArray[ix_beg].tx_space += (unsigned short)beg_space;
+              StatArray[ix_beg].tx_age    = StatArray[ix_beg].tx_age < compile_id ? compile_id : StatArray[ix_beg].tx_age;
+
+              StatArray[ix_end].tx_count++;
+              StatArray[ix_end].tx_space += (unsigned short)end_space;
+              StatArray[ix_end].tx_age    = StatArray[ix_end].tx_age < compile_id ? compile_id : StatArray[ix_end].tx_age;
+
+              StatArray[ix_beg].level     = comp_lvl;
+              StatArray[ix_beg].compiler  = cType;
+              StatArray[ix_end].level     = comp_lvl;
+              StatArray[ix_end].compiler  = cType;
+              break;
+            case nMethod_dead:
+            case nMethod_unloaded:
+              StatArray[ix_beg].dead_count++;
+              StatArray[ix_beg].dead_space += (unsigned short)beg_space;
+              StatArray[ix_end].dead_count++;
+              StatArray[ix_end].dead_space += (unsigned short)end_space;
+              break;
+            default:
+              // must be a stub, if it's not a dead or alive nMethod
+              nBlocks_stub++;
+              stubSpace   += hb_bytelen;
+              StatArray[ix_beg].stub_count++;
+              StatArray[ix_beg].stub_space += (unsigned short)beg_space;
+              StatArray[ix_end].stub_count++;
+              StatArray[ix_end].stub_space += (unsigned short)end_space;
+              break;
+          }
+          for (unsigned int ix = ix_beg+1; ix < ix_end; ix++) {
+            StatArray[ix].type = cbType;
+            switch (cbType) {
+              case nMethod_inuse:
+                if (comp_lvl < CompLevel_full_optimization) {
+                  StatArray[ix].t1_count++;
+                  StatArray[ix].t1_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t1_age    = StatArray[ix].t1_age < compile_id ? compile_id : StatArray[ix].t1_age;
+                } else {
+                  StatArray[ix].t2_count++;
+                  StatArray[ix].t2_space += (unsigned short)(granule_size>>log2_seg_size);
+                  StatArray[ix].t2_age    = StatArray[ix].t2_age < compile_id ? compile_id : StatArray[ix].t2_age;
+                }
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_alive:
+                StatArray[ix].tx_count++;
+                StatArray[ix].tx_space += (unsigned short)(granule_size>>log2_seg_size);
+                StatArray[ix].tx_age    = StatArray[ix].tx_age < compile_id ? compile_id : StatArray[ix].tx_age;
+                StatArray[ix].level     = comp_lvl;
+                StatArray[ix].compiler  = cType;
+                break;
+              case nMethod_dead:
+              case nMethod_unloaded:
+                StatArray[ix].dead_count++;
+                StatArray[ix].dead_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+              default:
+                // must be a stub, if it's not a dead or alive nMethod
+                StatArray[ix].stub_count++;
+                StatArray[ix].stub_space += (unsigned short)(granule_size>>log2_seg_size);
+                break;
+            }
+          }
+        }
+      }
+    }
+    if (n_methods > 0) {
+      avgTemp = hotnessAccumulator/n_methods;
+    } else {
+      avgTemp = 0;
+    }
+    done = true;
+
+    if (!insane) {
+      // There is a risk for this block (because it contains many print statements) to get
+      // interspersed with print data from other threads. We take this risk intentionally.
+      // Getting stalled waiting for tty_lock while holding the CodeCache_lock is not desirable.
+      printBox(ast, '-', "Global CodeHeap statistics for segment ", heapName);
+      ast->print_cr("freeSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_free     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", freeSpace/(size_t)K,     nBlocks_free,     (100.0*freeSpace)/size,     (100.0*freeSpace)/res_size);
+      ast->print_cr("usedSpace        = " SIZE_FORMAT_W(8) "k, nBlocks_used     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", usedSpace/(size_t)K,     nBlocks_used,     (100.0*usedSpace)/size,     (100.0*usedSpace)/res_size);
+      ast->print_cr("  Tier1 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t1       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t1Space/(size_t)K,       nBlocks_t1,       (100.0*t1Space)/size,       (100.0*t1Space)/res_size);
+      ast->print_cr("  Tier2 Space    = " SIZE_FORMAT_W(8) "k, nBlocks_t2       = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", t2Space/(size_t)K,       nBlocks_t2,       (100.0*t2Space)/size,       (100.0*t2Space)/res_size);
+      ast->print_cr("  Alive Space    = " SIZE_FORMAT_W(8) "k, nBlocks_alive    = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", aliveSpace/(size_t)K,    nBlocks_alive,    (100.0*aliveSpace)/size,    (100.0*aliveSpace)/res_size);
+      ast->print_cr("    disconnected = " SIZE_FORMAT_W(8) "k, nBlocks_disconn  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", disconnSpace/(size_t)K,  nBlocks_disconn,  (100.0*disconnSpace)/size,  (100.0*disconnSpace)/res_size);
+      ast->print_cr("    not entrant  = " SIZE_FORMAT_W(8) "k, nBlocks_notentr  = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", notentrSpace/(size_t)K,  nBlocks_notentr,  (100.0*notentrSpace)/size,  (100.0*notentrSpace)/res_size);
+      ast->print_cr("  unloadedSpace  = " SIZE_FORMAT_W(8) "k, nBlocks_unloaded = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", unloadedSpace/(size_t)K, nBlocks_unloaded, (100.0*unloadedSpace)/size, (100.0*unloadedSpace)/res_size);
+      ast->print_cr("  deadSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_dead     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", deadSpace/(size_t)K,     nBlocks_dead,     (100.0*deadSpace)/size,     (100.0*deadSpace)/res_size);
+      ast->print_cr("  stubSpace      = " SIZE_FORMAT_W(8) "k, nBlocks_stub     = %6d, %10.3f%% of capacity, %10.3f%% of max_capacity", stubSpace/(size_t)K,     nBlocks_stub,     (100.0*stubSpace)/size,     (100.0*stubSpace)/res_size);
+      ast->print_cr("ZombieBlocks     = %8d. These are HeapBlocks which could not be identified as CodeBlobs.", nBlocks_zomb);
+      ast->print_cr("latest allocated compilation id = %d", latest_compilation_id);
+      ast->print_cr("highest observed compilation id = %d", highest_compilation_id);
+      ast->print_cr("Building TopSizeList iterations = %ld", total_iterations);
+      ast->cr();
+
+      int             reset_val = NMethodSweeper::hotness_counter_reset_val();
+      double reverse_free_ratio = (res_size > size) ? (double)res_size/(double)(res_size-size) : (double)res_size;
+      printBox(ast, '-', "Method hotness information at time of this analysis", NULL);
+      ast->print_cr("Highest possible method temperature:          %12d", reset_val);
+      ast->print_cr("Threshold for method to be considered 'cold': %12.3f", -reset_val + reverse_free_ratio * NmethodSweepActivity);
+      ast->print_cr("min. hotness = %6d", minTemp);
+      ast->print_cr("avg. hotness = %6d", avgTemp);
+      ast->print_cr("max. hotness = %6d", maxTemp);
+      STRINGSTREAM_FLUSH("\n")
+
+      // This loop is intentionally printing directly to "out".
+      out->print("Verifying collected data...");
+      size_t granule_segs = granule_size>>log2_seg_size;
+      for (unsigned int ix = 0; ix < granules; ix++) {
+        if (StatArray[ix].t1_count   > granule_segs) {
+          out->print_cr("t1_count[%d]   = %d", ix, StatArray[ix].t1_count);
+        }
+        if (StatArray[ix].t2_count   > granule_segs) {
+          out->print_cr("t2_count[%d]   = %d", ix, StatArray[ix].t2_count);
+        }
+        if (StatArray[ix].stub_count > granule_segs) {
+          out->print_cr("stub_count[%d] = %d", ix, StatArray[ix].stub_count);
+        }
+        if (StatArray[ix].dead_count > granule_segs) {
+          out->print_cr("dead_count[%d] = %d", ix, StatArray[ix].dead_count);
+        }
+        if (StatArray[ix].t1_space   > granule_segs) {
+          out->print_cr("t1_space[%d]   = %d", ix, StatArray[ix].t1_space);
+        }
+        if (StatArray[ix].t2_space   > granule_segs) {
+          out->print_cr("t2_space[%d]   = %d", ix, StatArray[ix].t2_space);
+        }
+        if (StatArray[ix].stub_space > granule_segs) {
+          out->print_cr("stub_space[%d] = %d", ix, StatArray[ix].stub_space);
+        }
+        if (StatArray[ix].dead_space > granule_segs) {
+          out->print_cr("dead_space[%d] = %d", ix, StatArray[ix].dead_space);
+        }
+        //   this cast is awful! I need it because NT/Intel reports a signed/unsigned mismatch.
+        if ((size_t)(StatArray[ix].t1_count+StatArray[ix].t2_count+StatArray[ix].stub_count+StatArray[ix].dead_count) > granule_segs) {
+          out->print_cr("t1_count[%d] = %d, t2_count[%d] = %d, stub_count[%d] = %d", ix, StatArray[ix].t1_count, ix, StatArray[ix].t2_count, ix, StatArray[ix].stub_count);
+        }
+        if ((size_t)(StatArray[ix].t1_space+StatArray[ix].t2_space+StatArray[ix].stub_space+StatArray[ix].dead_space) > granule_segs) {
+          out->print_cr("t1_space[%d] = %d, t2_space[%d] = %d, stub_space[%d] = %d", ix, StatArray[ix].t1_space, ix, StatArray[ix].t2_space, ix, StatArray[ix].stub_space);
+        }
+      }
+
+      // This loop is intentionally printing directly to "out".
+      if (used_topSizeBlocks > 0) {
+        unsigned int j = 0;
+        if (TopSizeArray[0].len != currMax) {
+          out->print_cr("currMax(%d) differs from TopSizeArray[0].len(%d)", currMax, TopSizeArray[0].len);
+        }
+        for (unsigned int i = 0; (TopSizeArray[i].index != tsbStopper) && (j++ < alloc_topSizeBlocks); i = TopSizeArray[i].index) {
+          if (TopSizeArray[i].len < TopSizeArray[TopSizeArray[i].index].len) {
+            out->print_cr("sort error at index %d: %d !>= %d", i, TopSizeArray[i].len, TopSizeArray[TopSizeArray[i].index].len);
+          }
+        }
+        if (j >= alloc_topSizeBlocks) {
+          out->print_cr("Possible loop in TopSizeArray chaining!\n  allocBlocks = %d, usedBlocks = %d", alloc_topSizeBlocks, used_topSizeBlocks);
+          for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+            out->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          }
+        }
+      }
+      out->print_cr("...done\n\n");
+    } else {
+      // insane heap state detected. Analysis data incomplete. Just throw it away.
+      discard_StatArray(out);
+      discard_TopSizeArray(out);
+    }
+  }
+
+
+  done        = false;
+  while (!done && (nBlocks_free > 0)) {
+
+    printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   (free blocks) for segment ", heapName);
+    ast->print_cr("   The aggregate step collects information about all free blocks in CodeHeap.\n"
+                  "   Subsequent print functions create their output based on this snapshot.\n");
+    ast->print_cr("   Free space in %s is distributed over %d free blocks.", heapName, nBlocks_free);
+    ast->print_cr("   Each free block takes " SIZE_FORMAT " bytes of C heap for statistics data, that is " SIZE_FORMAT "K in total.", sizeof(FreeBlk), (sizeof(FreeBlk)*nBlocks_free)/K);
+    STRINGSTREAM_FLUSH("\n")
+
+    //----------------------------------------
+    //--  Prepare the FreeArray of FreeBlks --
+    //----------------------------------------
+
+    //---< discard old array if size does not match  >---
+    if (nBlocks_free != alloc_freeBlocks) {
+      discard_FreeArray(out);
+    }
+
+    prepare_FreeArray(out, nBlocks_free, heapName);
+    if (FreeArray == NULL) {
+      done = true;
+      continue;
+    }
+
+    //----------------------------------------
+    //--  Collect all FreeBlks in FreeArray --
+    //----------------------------------------
+
+    unsigned int ix = 0;
+    FreeBlock* cur  = heap->freelist();
+
+    while (cur != NULL) {
+      if (ix < alloc_freeBlocks) { // don't index out of bounds if _freelist has more blocks than anticipated
+        FreeArray[ix].start = cur;
+        FreeArray[ix].len   = (unsigned int)(cur->length()<<log2_seg_size);
+        FreeArray[ix].index = ix;
+      }
+      cur  = cur->link();
+      ix++;
+    }
+    if (ix != alloc_freeBlocks) {
+      ast->print_cr("Free block count mismatch. Expected %d free blocks, but found %d.", alloc_freeBlocks, ix);
+      ast->print_cr("I will update the counter and retry data collection");
+      STRINGSTREAM_FLUSH("\n")
+      nBlocks_free = ix;
+      continue;
+    }
+    done = true;
+  }
+
+  if (!done || (nBlocks_free == 0)) {
+    if (nBlocks_free == 0) {
+      printBox(ast, '-', "no free blocks found in", heapName);
+    } else if (!done) {
+      ast->print_cr("Free block count mismatch could not be resolved.");
+      ast->print_cr("Try to run \"aggregate\" function to update counters");
+    }
+    STRINGSTREAM_FLUSH("")
+
+    //---< discard old array and update global values  >---
+    discard_FreeArray(out);
+    set_HeapStatGlobals(out, heapName);
+    return;
+  }
+
+  //---<  calculate and fill remaining fields  >---
+  if (FreeArray != NULL) {
+    // This loop is intentionally printing directly to "out".
+    for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      size_t lenSum = 0;
+      FreeArray[ix].gap = (unsigned int)((address)FreeArray[ix+1].start - ((address)FreeArray[ix].start + FreeArray[ix].len));
+      for (HeapBlock *h = heap->next_block(FreeArray[ix].start); (h != NULL) && (h != FreeArray[ix+1].start); h = heap->next_block(h)) {
+        CodeBlob *cb  = (CodeBlob*)(heap->find_start(h));
+        if ((cb != NULL) && !cb->is_nmethod()) {
+          FreeArray[ix].stubs_in_gap = true;
+        }
+        FreeArray[ix].n_gapBlocks++;
+        lenSum += h->length()<<log2_seg_size;
+        if (((address)h < ((address)FreeArray[ix].start+FreeArray[ix].len)) || (h >= FreeArray[ix+1].start)) {
+          out->print_cr("unsorted occupied CodeHeap block found @ %p, gap interval [%p, %p)", h, (address)FreeArray[ix].start+FreeArray[ix].len, FreeArray[ix+1].start);
+        }
+      }
+      if (lenSum != FreeArray[ix].gap) {
+        out->print_cr("Length mismatch for gap between FreeBlk[%d] and FreeBlk[%d]. Calculated: %d, accumulated: %d.", ix, ix+1, FreeArray[ix].gap, (unsigned int)lenSum);
+      }
+    }
+  }
+  set_HeapStatGlobals(out, heapName);
+
+  printBox(ast, '=', "C O D E   H E A P   A N A L Y S I S   C O M P L E T E   for segment ", heapName);
+  STRINGSTREAM_FLUSH("\n")
+}
+
+
+void CodeHeapState::print_usedSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (TopSizeArray == NULL) || (used_topSizeBlocks == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "U S E D   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: The Top%d list of the largest used blocks associates method names\n"
+                  "      and other identifying information with the block size data.\n"
+                  "\n"
+                  "      Method names are dynamically retrieved from the code cache at print time.\n"
+                  "      Due to the living nature of the code cache and because the CodeCache_lock\n"
+                  "      is not continuously held, the displayed name might be wrong or no name\n"
+                  "      might be found at all. The likelihood for that to happen increases\n"
+                  "      over time passed between analysis and print step.\n", used_topSizeBlocks);
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  //----------------------------
+  //--  Print Top Used Blocks --
+  //----------------------------
+  {
+    char*     low_bound = heap->low_boundary();
+
+    printBox(ast, '-', "Largest Used Blocks in ", heapName);
+    print_blobType_legend(ast);
+
+    ast->fill_to(51);
+    ast->print("%4s", "blob");
+    ast->fill_to(56);
+    ast->print("%9s", "compiler");
+    ast->fill_to(66);
+    ast->print_cr("%6s", "method");
+    ast->print_cr("%18s %13s %17s %4s %9s  %5s %s",      "Addr(module)      ", "offset", "size", "type", " type lvl", " temp", "Name");
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    //---<  print Top Ten Used Blocks  >---
+    if (used_topSizeBlocks > 0) {
+      unsigned int printed_topSizeBlocks = 0;
+      for (unsigned int i = 0; i != tsbStopper; i = TopSizeArray[i].index) {
+        printed_topSizeBlocks++;
+        CodeBlob*   this_blob = (CodeBlob*)(heap->find_start(TopSizeArray[i].start));
+        nmethod*           nm = NULL;
+        const char* blob_name = "unnamed blob";
+        if (this_blob != NULL) {
+          blob_name = this_blob->name();
+          nm        = this_blob->as_nmethod_or_null();
+          //---<  blob address  >---
+          ast->print("%p", this_blob);
+          ast->fill_to(19);
+          //---<  blob offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+          ast->fill_to(33);
+        } else {
+          //---<  block address  >---
+          ast->print("%p", TopSizeArray[i].start);
+          ast->fill_to(19);
+          //---<  block offset from CodeHeap begin  >---
+          ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)TopSizeArray[i].start-low_bound));
+          ast->fill_to(33);
+        }
+
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          ast->fill_to(51);
+          ast->print("  %c", blobTypeChar[TopSizeArray[i].type]);
+          //---<  compiler information  >---
+          ast->fill_to(56);
+          ast->print("%5s %3d", compTypeName[TopSizeArray[i].compiler], TopSizeArray[i].level);
+          //---<  method temperature  >---
+          ast->fill_to(67);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          if (nm->is_in_use())      {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_not_entrant()) {blob_name = nm->method()->name_and_sig_as_C_string(); }
+          if (nm->is_zombie())      {ast->print("%14s", " zombie method"); }
+          ast->print("%s", blob_name);
+        } else {
+          //---<  block size in hex  >---
+          ast->print(PTR32_FORMAT, (unsigned int)(TopSizeArray[i].len<<log2_seg_size));
+          ast->print("(" SIZE_FORMAT_W(4) "K)", (TopSizeArray[i].len<<log2_seg_size)/K);
+          //---<  no compiler information  >---
+          ast->fill_to(56);
+          //---<  name and signature  >---
+          ast->fill_to(67+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+      }
+      if (used_topSizeBlocks != printed_topSizeBlocks) {
+        ast->print_cr("used blocks: %d, printed blocks: %d", used_topSizeBlocks, printed_topSizeBlocks);
+        STRINGSTREAM_FLUSH("")
+        for (unsigned int i = 0; i < alloc_topSizeBlocks; i++) {
+          ast->print_cr("  TopSizeArray[%d].index = %d, len = %d", i, TopSizeArray[i].index, TopSizeArray[i].len);
+          STRINGSTREAM_FLUSH("")
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n\n")
+    }
+  }
+
+  //-----------------------------
+  //--  Print Usage Histogram  --
+  //-----------------------------
+
+  if (SizeDistributionArray != NULL) {
+    unsigned long total_count = 0;
+    unsigned long total_size  = 0;
+    const unsigned long pctFactor = 200;
+
+    for (unsigned int i = 0; i < nSizeDistElements; i++) {
+      total_count += SizeDistributionArray[i].count;
+      total_size  += SizeDistributionArray[i].lenSum;
+    }
+
+    if ((total_count > 0) && (total_size > 0)) {
+      printBox(ast, '-', "Block count histogram for ", heapName);
+      ast->print_cr("Note: The histogram indicates how many blocks (as a percentage\n"
+                    "      of all blocks) have a size in the given range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*SizeDistributionArray[i].count/total_count;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+\n\n");
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+
+      printBox(ast, '-', "Contribution per size range to total size for ", heapName);
+      ast->print_cr("Note: The histogram indicates how much space (as a percentage of all\n"
+                    "      occupied space) is used by the blocks in the given size range.\n"
+                    "      %ld characters are printed per percentage point.\n", pctFactor/100);
+      ast->print_cr("total size   of all blocks: %7ldM", (total_size<<log2_seg_size)/M);
+      ast->print_cr("total number of all blocks: %7ld\n", total_count);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      ast->print_cr("[Size Range)------avg.-size-+----count-+");
+      for (unsigned int i = 0; i < nSizeDistElements; i++) {
+        if (SizeDistributionArray[i].rangeStart<<log2_seg_size < K) {
+          ast->print("[" SIZE_FORMAT_W(5) " .." SIZE_FORMAT_W(5) " ): "
+                    ,(size_t)(SizeDistributionArray[i].rangeStart<<log2_seg_size)
+                    ,(size_t)(SizeDistributionArray[i].rangeEnd<<log2_seg_size)
+                    );
+        } else if (SizeDistributionArray[i].rangeStart<<log2_seg_size < M) {
+          ast->print("[" SIZE_FORMAT_W(5) "K.." SIZE_FORMAT_W(5) "K): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/K
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/K
+                    );
+        } else {
+          ast->print("[" SIZE_FORMAT_W(5) "M.." SIZE_FORMAT_W(5) "M): "
+                    ,(SizeDistributionArray[i].rangeStart<<log2_seg_size)/M
+                    ,(SizeDistributionArray[i].rangeEnd<<log2_seg_size)/M
+                    );
+        }
+        ast->print(" %8d | %8d |",
+                   SizeDistributionArray[i].count > 0 ? (SizeDistributionArray[i].lenSum<<log2_seg_size)/SizeDistributionArray[i].count : 0,
+                   SizeDistributionArray[i].count);
+
+        unsigned int percent = pctFactor*(unsigned long)SizeDistributionArray[i].lenSum/total_size;
+        for (unsigned int j = 1; j <= percent; j++) {
+          ast->print("%c", (j%((pctFactor/100)*10) == 0) ? ('0'+j/(((unsigned int)pctFactor/100)*10)) : '*');
+        }
+        ast->cr();
+      }
+      ast->print_cr("----------------------------+----------+");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_freeSpace(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (FreeArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  {
+    printBox(ast, '=', "F R E E   S P A C E   S T A T I S T I C S   for ", heapName);
+    ast->print_cr("Note: in this context, a gap is the occupied space between two free blocks.\n"
+                  "      Those gaps are of interest if there is a chance that they become\n"
+                  "      unoccupied, e.g. by class unloading. Then, the two adjacent free\n"
+                  "      blocks, together with the now unoccupied space, form a new, large\n"
+                  "      free block.");
+    STRINGSTREAM_FLUSH_LOCKED("\n")
+  }
+
+  {
+    printBox(ast, '-', "List of all Free Blocks in ", heapName);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    unsigned int ix = 0;
+    for (ix = 0; ix < alloc_freeBlocks-1; ix++) {
+      ast->print("%p: Len[%4d] = " HEX32_FORMAT ",", FreeArray[ix].start, ix, FreeArray[ix].len);
+      ast->fill_to(38);
+      ast->print("Gap[%4d..%4d]: " HEX32_FORMAT " bytes,", ix, ix+1, FreeArray[ix].gap);
+      ast->fill_to(71);
+      ast->print("block count: %6d", FreeArray[ix].n_gapBlocks);
+      if (FreeArray[ix].stubs_in_gap) {
+        ast->print(" !! permanent gap, contains stubs and/or blobs !!");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("\n")
+    }
+    ast->print_cr("%p: Len[%4d] = " HEX32_FORMAT, FreeArray[ix].start, ix, FreeArray[ix].len);
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //-----------------------------------------
+  //--  Find and Print Top Ten Free Blocks --
+  //-----------------------------------------
+
+  //---<  find Top Ten Free Blocks  >---
+  const unsigned int nTop = 10;
+  unsigned int  currMax10 = 0;
+  struct FreeBlk* FreeTopTen[nTop];
+  memset(FreeTopTen, 0, sizeof(FreeTopTen));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks; ix++) {
+    if (FreeArray[ix].len > currMax10) {  // larger than the ten largest found so far
+      unsigned int currSize = FreeArray[ix].len;
+
+      unsigned int iy;
+      for (iy = 0; iy < nTop && FreeTopTen[iy] != NULL; iy++) {
+        if (FreeTopTen[iy]->len < currSize) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) { // make room to insert new free block
+            FreeTopTen[iz] = FreeTopTen[iz-1];
+          }
+          FreeTopTen[iy] = &FreeArray[ix];        // insert new free block
+          if (FreeTopTen[nTop-1] != NULL) {
+            currMax10 = FreeTopTen[nTop-1]->len;
+          }
+          break; // done with this, check next free block
+        }
+      }
+      if (iy >= nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      currSize, currMax10);
+        continue;
+      }
+      if (FreeTopTen[iy] == NULL) {
+        FreeTopTen[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = currSize;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free Blocks in ", heapName);
+
+    //---<  print Top Ten Free Blocks  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTen[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTen[iy]->index, FreeTopTen[iy]->len);
+      ast->fill_to(39);
+      if (FreeTopTen[iy]->index == (alloc_freeBlocks-1)) {
+        ast->print("last free block in list.");
+      } else {
+        ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTen[iy]->gap);
+        ast->fill_to(63);
+        ast->print("#blocks (in gap) %d", FreeTopTen[iy]->n_gapBlocks);
+      }
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+
+
+  //--------------------------------------------------------
+  //--  Find and Print Top Ten Free-Occupied-Free Triples --
+  //--------------------------------------------------------
+
+  //---<  find and print Top Ten Triples (Free-Occupied-Free)  >---
+  currMax10 = 0;
+  struct FreeBlk  *FreeTopTenTriple[nTop];
+  memset(FreeTopTenTriple, 0, sizeof(FreeTopTenTriple));
+
+  for (unsigned int ix = 0; ix < alloc_freeBlocks-1; ix++) {
+    // If there are stubs in the gap, this gap will never become completely free.
+    // The triple will thus never merge to one free block.
+    unsigned int lenTriple  = FreeArray[ix].len + (FreeArray[ix].stubs_in_gap ? 0 : FreeArray[ix].gap + FreeArray[ix+1].len);
+    FreeArray[ix].len = lenTriple;
+    if (lenTriple > currMax10) {  // larger than the ten largest found so far
+
+      unsigned int iy;
+      for (iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+        if (FreeTopTenTriple[iy]->len < lenTriple) {
+          for (unsigned int iz = nTop-1; iz > iy; iz--) {
+            FreeTopTenTriple[iz] = FreeTopTenTriple[iz-1];
+          }
+          FreeTopTenTriple[iy] = &FreeArray[ix];
+          if (FreeTopTenTriple[nTop-1] != NULL) {
+            currMax10 = FreeTopTenTriple[nTop-1]->len;
+          }
+          break;
+        }
+      }
+      if (iy == nTop) {
+        ast->print_cr("Internal logic error. New Max10 = %d detected, but could not be merged. Old Max10 = %d",
+                      lenTriple, currMax10);
+        continue;
+      }
+      if (FreeTopTenTriple[iy] == NULL) {
+        FreeTopTenTriple[iy] = &FreeArray[ix];
+        if (iy == (nTop-1)) {
+          currMax10 = lenTriple;
+        }
+      }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  {
+    printBox(ast, '-', "Top Ten Free-Occupied-Free Triples in ", heapName);
+    ast->print_cr("  Use this information to judge how likely it is that a large(r) free block\n"
+                  "  might get created by code cache sweeping.\n"
+                  "  If all the occupied blocks can be swept, the three free blocks will be\n"
+                  "  merged into one (much larger) free block. That would reduce free space\n"
+                  "  fragmentation.\n");
+
+    //---<  print Top Ten Free-Occupied-Free Triples  >---
+    for (unsigned int iy = 0; (iy < nTop) && (FreeTopTenTriple[iy] != NULL); iy++) {
+      ast->print("Pos %3d: Block %4d - size " HEX32_FORMAT ",", iy+1, FreeTopTenTriple[iy]->index, FreeTopTenTriple[iy]->len);
+      ast->fill_to(39);
+      ast->print("Gap (to next) " HEX32_FORMAT ",", FreeTopTenTriple[iy]->gap);
+      ast->fill_to(63);
+      ast->print("#blocks (in gap) %d", FreeTopTenTriple[iy]->n_gapBlocks);
+      ast->cr();
+    }
+    STRINGSTREAM_FLUSH_LOCKED("\n\n")
+  }
+}
+
+
+void CodeHeapState::print_count(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "B L O C K   C O U N T S   for ", heapName);
+    ast->print_cr("  Each granule contains an individual number of heap blocks. Large blocks\n"
+                  "  may span multiple granules and are counted for each granule they touch.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) count for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all tiers) count, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int count = StatArray[ix].t1_count   + StatArray[ix].t2_count   + StatArray[ix].tx_count
+                           + StatArray[ix].stub_count + StatArray[ix].dead_count;
+        print_count_single(ast, count);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t1_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].t2_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].tx_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No not_used/not_entrant nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub & Blob count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead nMethod count only, 0x1..0xf. '*' indicates >= 16 blocks, ' ' indicates empty", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].dead_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].dead_count);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Count by tier (combined, no dead blocks): <#t1>:<#t2>:<#s>, 0x0..0xf. '*' indicates >= 16 blocks", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        print_count_single(ast, StatArray[ix].t1_count);
+        ast->print(":");
+        print_count_single(ast, StatArray[ix].t2_count);
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_count > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_count_single(ast, StatArray[ix].stub_count);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_space(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "S P A C E   U S A G E  &  F R A G M E N T A T I O N   for ", heapName);
+    ast->print_cr("  The heap space covered by one granule is occupied to a various extend.\n"
+                  "  The granule occupancy is displayed by one decimal digit per granule.\n");
+    if (segment_granules) {
+      ast->print_cr("  You have selected granule size to be as small as segment size.\n"
+                    "  As a result, each granule contains exactly one block (or a part of one block)\n"
+                    "  or is displayed as empty (' ') if it's BlobType does not match the selection.\n"
+                    "  Occupied granules show their BlobType character, see legend.\n");
+      print_blobType_legend(ast);
+    } else {
+      ast->print_cr("  These digits represent a fill percentage range (see legend).\n");
+      print_space_legend(ast);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    if (segment_granules) {
+      printBox(ast, '-', "Total (all types) space consumption for granule size == segment size", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_blobType_single(ast, StatArray[ix].type);
+      }
+    } else {
+      printBox(ast, '-', "Total (all types) space consumption. ' ' indicates empty, '*' indicates full.", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        unsigned int space    = StatArray[ix].t1_space   + StatArray[ix].t2_space  + StatArray[ix].tx_space
+                              + StatArray[ix].stub_space + StatArray[ix].dead_space;
+        print_space_single(ast, space);
+      }
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant space consumption. ' ' indicates empty, '*' indicates full", NULL);
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].tx_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].tx_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_stub > 0) {
+      printBox(ast, '-', "Stub and Blob space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Stubs and Blobs found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_dead > 0) {
+      printBox(ast, '-', "Dead space consumption. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_space_single(ast, StatArray[ix].dead_space);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No dead nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "Space consumption by tier (combined): <t1%>:<t2%>:<s%>. ' ' indicates empty, '*' indicates full", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 24;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+
+        if (segment_granules && StatArray[ix].t1_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t1_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].t2_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].t2_space);
+        }
+        ast->print(":");
+        if (segment_granules && StatArray[ix].stub_space > 0) {
+          print_blobType_single(ast, StatArray[ix].type);
+        } else {
+          print_space_single(ast, StatArray[ix].stub_space);
+        }
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+void CodeHeapState::print_age(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line = 32;
+  char*        low_bound         = heap->low_boundary();
+
+  {
+    printBox(ast, '=', "M E T H O D   A G E   by CompileID for ", heapName);
+    ast->print_cr("  The age of a compiled method in the CodeHeap is not available as a\n"
+                  "  time stamp. Instead, a relative age is deducted from the method's compilation ID.\n"
+                  "  Age information is available for tier1 and tier2 methods only. There is no\n"
+                  "  age information for stubs and blobs, because they have no compilation ID assigned.\n"
+                  "  Information for the youngest method (highest ID) in the granule is printed.\n"
+                  "  Refer to the legend to learn how method age is mapped to the displayed digit.");
+    print_age_legend(ast);
+    STRINGSTREAM_FLUSH_LOCKED("")
+  }
+
+  {
+    printBox(ast, '-', "Age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+    STRINGSTREAM_FLUSH_LOCKED("")
+
+    granules_per_line = 128;
+    for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+      print_line_delim(out, ast, low_bound, ix, granules_per_line);
+      unsigned int age1      = StatArray[ix].t1_age;
+      unsigned int age2      = StatArray[ix].t2_age;
+      unsigned int agex      = StatArray[ix].tx_age;
+      unsigned int age       = age1 > age2 ? age1 : age2;
+      age       = age > agex ? age : agex;
+      print_age_single(ast, age);
+    }
+    STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+  }
+
+  {
+    if (nBlocks_t1 > 0) {
+      printBox(ast, '-', "Tier1 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier1 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_t2 > 0) {
+      printBox(ast, '-', "Tier2 age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t2_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (nBlocks_alive > 0) {
+      printBox(ast, '-', "not_used/not_entrant age distribution. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 128;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].tx_age);
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    } else {
+      ast->print("No Tier2 nMethods found in CodeHeap.");
+      STRINGSTREAM_FLUSH_LOCKED("\n\n\n")
+    }
+  }
+
+  {
+    if (!segment_granules) { // Prevent totally redundant printouts
+      printBox(ast, '-', "age distribution by tier <a1>:<a2>. '0' indicates youngest 1/256, '8': oldest half, ' ': no age information", NULL);
+      STRINGSTREAM_FLUSH_LOCKED("")
+
+      granules_per_line = 32;
+      for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+        print_line_delim(out, ast, low_bound, ix, granules_per_line);
+        print_age_single(ast, StatArray[ix].t1_age);
+        ast->print(":");
+        print_age_single(ast, StatArray[ix].t2_age);
+        ast->print(" ");
+      }
+      STRINGSTREAM_FLUSH_LOCKED("|\n\n\n")
+    }
+  }
+}
+
+
+void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
+  if (!initialization_complete) {
+    return;
+  }
+
+  const char* heapName   = get_heapName(heap);
+  get_HeapStatGlobals(out, heapName);
+
+  if ((StatArray == NULL) || (alloc_granules == 0)) {
+    return;
+  }
+  STRINGSTREAM_DECL(ast, out)
+
+  unsigned int granules_per_line  = 128;
+  char*        low_bound          = heap->low_boundary();
+  CodeBlob*    last_blob          = NULL;
+  bool         name_in_addr_range = true;
+
+  //---<  print at least 128K per block  >---
+  if (granules_per_line*granule_size < 128*K) {
+    granules_per_line = (unsigned int)((128*K)/granule_size);
+  }
+
+  printBox(ast, '=', "M E T H O D   N A M E S   for ", heapName);
+  ast->print_cr("  Method names are dynamically retrieved from the code cache at print time.\n"
+                "  Due to the living nature of the code heap and because the CodeCache_lock\n"
+                "  is not continuously held, the displayed name might be wrong or no name\n"
+                "  might be found at all. The likelihood for that to happen increases\n"
+                "  over time passed between analysis and print step.\n");
+  STRINGSTREAM_FLUSH_LOCKED("")
+
+  for (unsigned int ix = 0; ix < alloc_granules; ix++) {
+    //---<  print a new blob on a new line  >---
+    if (ix%granules_per_line == 0) {
+      if (!name_in_addr_range) {
+        ast->print_cr("No methods, blobs, or stubs found in this address range");
+      }
+      name_in_addr_range = false;
+
+      ast->cr();
+      ast->print_cr("--------------------------------------------------------------------");
+      ast->print_cr("Address range [%p,%p), " SIZE_FORMAT "k", low_bound+ix*granule_size, low_bound+(ix+granules_per_line)*granule_size, granules_per_line*granule_size/(size_t)K);
+      ast->print_cr("--------------------------------------------------------------------");
+      STRINGSTREAM_FLUSH_LOCKED("")
+    }
+    // Only check granule if it contains at least one blob.
+    unsigned int nBlobs  = StatArray[ix].t1_count   + StatArray[ix].t2_count + StatArray[ix].tx_count +
+                           StatArray[ix].stub_count + StatArray[ix].dead_count;
+    if (nBlobs > 0 ) {
+    for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
+      // heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
+      CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
+      bool blob_initialized = (this_blob != NULL) &&
+                              ((char*)this_blob + this_blob->header_size() == (char*)(this_blob->relocation_begin())) &&
+                              ((char*)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (char*)(this_blob->content_begin()));
+      if (blob_initialized && (this_blob != last_blob)) {
+        if (!name_in_addr_range) {
+          name_in_addr_range = true;
+          ast->fill_to(51);
+          ast->print("%9s", "compiler");
+          ast->fill_to(61);
+          ast->print_cr("%6s", "method");
+          ast->print_cr("%18s %13s %17s %9s  %5s %18s  %s", "Addr(module)      ", "offset", "size", " type lvl", " temp", "blobType          ", "Name");
+        }
+
+        //---<  Print blobTypeName as recorded during analysis  >---
+        ast->print("%p", this_blob);
+        ast->fill_to(19);
+        ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
+        ast->fill_to(33);
+
+        //---<  print size, name, and signature (for nMethods)  >---
+        // this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe not visible on stack
+        const char* blob_name = this_blob->name();
+        if (blob_name == 0) {
+          blob_name = "<unavailable>";
+        }
+        // this_blob->as_nmethod_or_null() is safe. Inlined, maybe not visible on stack.
+        nmethod*           nm = this_blob->as_nmethod_or_null();
+        blobType       cbType = noType;
+        if (segment_granules) {
+          cbType = (blobType)StatArray[ix].type;
+        } else {
+          cbType = get_cbType(this_blob);
+        }
+        if ((nm != NULL) && (nm->method() != NULL)) {
+          ResourceMark rm;
+          //---<  nMethod size in hex  >---
+          unsigned int total_size = nm->total_size();
+          ast->print(PTR32_FORMAT, total_size);
+          ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
+          //---<  compiler information  >---
+          ast->fill_to(51);
+          ast->print("%5s %3d", compTypeName[StatArray[ix].compiler], StatArray[ix].level);
+          //---<  method temperature  >---
+          ast->fill_to(62);
+          ast->print("%5d", nm->hotness_counter());
+          //---<  name and signature  >---
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          if (nm->is_in_use()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_not_entrant()) {
+            blob_name = nm->method()->name_and_sig_as_C_string();
+          }
+          if (nm->is_zombie()) {
+            ast->print("%14s", " zombie method");
+          }
+          ast->print("%s", blob_name);
+        } else {
+          ast->fill_to(62+6);
+          ast->print("%s", blobTypeName[cbType]);
+          ast->fill_to(82+6);
+          ast->print("%s", blob_name);
+        }
+        STRINGSTREAM_FLUSH_LOCKED("\n")
+        last_blob          = this_blob;
+      } else if (!blob_initialized && (this_blob != NULL)) {
+        last_blob          = this_blob;
+      }
+    }
+    }
+  }
+  STRINGSTREAM_FLUSH_LOCKED("\n\n")
+}
+
+
+void CodeHeapState::printBox(outputStream* ast, const char border, const char* text1, const char* text2) {
+  unsigned int lineLen = 1 + 2 + 2 + 1;
+  char edge, frame;
+
+  if (text1 != NULL) {
+    lineLen += (unsigned int)strlen(text1); // text1 is much shorter than MAX_INT chars.
+  }
+  if (text2 != NULL) {
+    lineLen += (unsigned int)strlen(text2); // text2 is much shorter than MAX_INT chars.
+  }
+  if (border == '-') {
+    edge  = '+';
+    frame = '|';
+  } else {
+    edge  = border;
+    frame = border;
+  }
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+
+  ast->print("%c  ", frame);
+  if (text1 != NULL) {
+    ast->print("%s", text1);
+  }
+  if (text2 != NULL) {
+    ast->print("%s", text2);
+  }
+  ast->print_cr("  %c", frame);
+
+  ast->print("%c", edge);
+  for (unsigned int i = 0; i < lineLen-2; i++) {
+    ast->print("%c", border);
+  }
+  ast->print_cr("%c", edge);
+}
+
+void CodeHeapState::print_blobType_legend(outputStream* out) {
+  out->cr();
+  printBox(out, '-', "Block types used in the following CodeHeap dump", NULL);
+  for (int type = noType; type < lastType; type += 1) {
+    out->print_cr("  %c - %s", blobTypeChar[type], blobTypeName[type]);
+  }
+  out->print_cr("  -----------------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_space_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Space ranges, based on granule occupancy", NULL);
+  out->print_cr("    -   0%% == occupancy");
+  for (int i=0; i<=9; i++) {
+    out->print_cr("  %d - %3d%% < occupancy < %3d%%", i, 10*i, 10*(i+1));
+  }
+  out->print_cr("  * - 100%% == occupancy");
+  out->print_cr("  ----------------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_age_legend(outputStream* out) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  unsigned int range_beg = latest_compilation_id;
+  out->cr();
+  printBox(out, '-', "Age ranges, based on compilation id", NULL);
+  while (age_range > 0) {
+    out->print_cr("  %d - %6d to %6d", indicator, range_beg, latest_compilation_id - latest_compilation_id/age_range);
+    range_beg = latest_compilation_id - latest_compilation_id/age_range;
+    age_range /= 2;
+    indicator += 1;
+  }
+  out->print_cr("  -----------------------------------------");
+  out->cr();
+}
+
+void CodeHeapState::print_blobType_single(outputStream* out, u2 /* blobType */ type) {
+  out->print("%c", blobTypeChar[type]);
+}
+
+void CodeHeapState::print_count_single(outputStream* out, unsigned short count) {
+  if (count >= 16)    out->print("*");
+  else if (count > 0) out->print("%1.1x", count);
+  else                out->print(" ");
+}
+
+void CodeHeapState::print_space_single(outputStream* out, unsigned short space) {
+  size_t  space_in_bytes = ((unsigned int)space)<<log2_seg_size;
+  char    fraction       = (space == 0) ? ' ' : (space_in_bytes >= granule_size-1) ? '*' : char('0'+10*space_in_bytes/granule_size);
+  out->print("%c", fraction);
+}
+
+void CodeHeapState::print_age_single(outputStream* out, unsigned int age) {
+  unsigned int indicator = 0;
+  unsigned int age_range = 256;
+  if (age > 0) {
+    while ((age_range > 0) && (latest_compilation_id-age > latest_compilation_id/age_range)) {
+      age_range /= 2;
+      indicator += 1;
+    }
+    out->print("%c", char('0'+indicator));
+  } else {
+    out->print(" ");
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, outputStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+    assert(out == ast, "must use the same stream!");
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+void CodeHeapState::print_line_delim(outputStream* out, bufferedStream* ast, char* low_bound, unsigned int ix, unsigned int gpl) {
+  assert(out != ast, "must not use the same stream!");
+  if (ix % gpl == 0) {
+    if (ix > 0) {
+      ast->print("|");
+    }
+    ast->cr();
+
+    { // can't use STRINGSTREAM_FLUSH_LOCKED("") here.
+      ttyLocker ttyl;
+      out->print("%s", ast->as_string());
+      ast->reset();
+    }
+
+    ast->print("%p", low_bound + ix*granule_size);
+    ast->fill_to(19);
+    ast->print("(+" PTR32_FORMAT "): |", (unsigned int)(ix*granule_size));
+  }
+}
+
+CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
+  if (cb != NULL ) {
+    if (cb->is_runtime_stub())                return runtimeStub;
+    if (cb->is_deoptimization_stub())         return deoptimizationStub;
+    if (cb->is_uncommon_trap_stub())          return uncommonTrapStub;
+    if (cb->is_exception_stub())              return exceptionStub;
+    if (cb->is_safepoint_stub())              return safepointStub;
+    if (cb->is_adapter_blob())                return adapterBlob;
+    if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
+    if (cb->is_buffer_blob())                 return bufferBlob;
+
+    if (cb->is_nmethod() ) {
+      if (((nmethod*)cb)->is_in_use())        return nMethod_inuse;
+      if (((nmethod*)cb)->is_alive() && !(((nmethod*)cb)->is_not_entrant()))   return nMethod_notused;
+      if (((nmethod*)cb)->is_alive())         return nMethod_alive;
+      if (((nmethod*)cb)->is_unloaded())      return nMethod_unloaded;
+      if (((nmethod*)cb)->is_zombie())        return nMethod_dead;
+      tty->print_cr("unhandled nmethod state");
+      return nMethod_dead;
+    }
+  }
+  return noType;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/code/codeHeapState.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CODE_CODEHEAPSTATE_HPP
+#define SHARE_CODE_CODEHEAPSTATE_HPP
+
+#include "memory/heap.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
+
+class CodeHeapState : public CHeapObj<mtCode> {
+
+ public:
+  enum compType {
+    noComp = 0,     // must be! due to initialization by memset to zero
+    c1,
+    c2,
+    jvmci,
+    lastComp
+  };
+
+  enum blobType {
+     noType = 0,         // must be! due to initialization by memset to zero
+     // The nMethod_* values correspond 1:1 to the CompiledMethod enum values.
+     nMethod_inuse,       // executable. This is the "normal" state for a nmethod.
+     nMethod_notused,     // assumed inactive, marked not entrant. Could be revived if necessary.
+     nMethod_notentrant,  // no new activations allowed, marked for deoptimization. Old activations may still exist.
+                         // Will transition to "zombie" after all activations are gone.
+     nMethod_zombie,      // No more activations exist, ready for purge (remove from code cache).
+     nMethod_unloaded,    // No activations exist, should not be called. Transient state on the way to "zombie".
+     nMethod_alive = nMethod_notentrant, // Combined state: nmethod may have activations, thus can't be purged.
+     nMethod_dead  = nMethod_zombie,     // Combined state: nmethod does not have any activations.
+     runtimeStub   = nMethod_unloaded + 1,
+     ricochetStub,
+     deoptimizationStub,
+     uncommonTrapStub,
+     exceptionStub,
+     safepointStub,
+     adapterBlob,
+     mh_adapterBlob,
+     bufferBlob,
+     lastType
+  };
+
+ private:
+  static void prepare_StatArray(outputStream* out, size_t nElem, size_t granularity, const char* heapName);
+  static void prepare_FreeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_TopSizeArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void prepare_SizeDistArray(outputStream* out, unsigned int nElem, const char* heapName);
+  static void discard_StatArray(outputStream* out);
+  static void discard_FreeArray(outputStream* out);
+  static void discard_TopSizeArray(outputStream* out);
+  static void discard_SizeDistArray(outputStream* out);
+
+  static void update_SizeDistArray(outputStream* out, unsigned int len);
+
+  static const char* get_heapName(CodeHeap* heap);
+  static unsigned int findHeapIndex(outputStream* out, const char* heapName);
+  static void get_HeapStatGlobals(outputStream* out, const char* heapName);
+  static void set_HeapStatGlobals(outputStream* out, const char* heapName);
+
+  static void printBox(outputStream* out, const char border, const char* text1, const char* text2);
+  static void print_blobType_legend(outputStream* out);
+  static void print_space_legend(outputStream* out);
+  static void print_age_legend(outputStream* out);
+  static void print_blobType_single(outputStream *ast, u2 /* blobType */ type);
+  static void print_count_single(outputStream *ast, unsigned short count);
+  static void print_space_single(outputStream *ast, unsigned short space);
+  static void print_age_single(outputStream *ast, unsigned int age);
+  static void print_line_delim(outputStream* out, bufferedStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static void print_line_delim(outputStream* out, outputStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
+  static blobType get_cbType(CodeBlob* cb);
+
+ public:
+  static void discard(outputStream* out, CodeHeap* heap);
+  static void aggregate(outputStream* out, CodeHeap* heap, const char* granularity);
+  static void print_usedSpace(outputStream* out, CodeHeap* heap);
+  static void print_freeSpace(outputStream* out, CodeHeap* heap);
+  static void print_count(outputStream* out, CodeHeap* heap);
+  static void print_space(outputStream* out, CodeHeap* heap);
+  static void print_age(outputStream* out, CodeHeap* heap);
+  static void print_names(outputStream* out, CodeHeap* heap);
+};
+
+//----------------
+//  StatElement
+//----------------
+//  Each analysis granule is represented by an instance of
+//  this StatElement struct. It collects and aggregates all
+//  information describing the allocated contents of the granule.
+//  Free (unallocated) contents is not considered (see FreeBlk for that).
+//  All StatElements of a heap segment are stored in the related StatArray.
+//  Current size: 40 bytes + 8 bytes class header.
+class StatElement : public CHeapObj<mtCode> {
+  public:
+    // A note on ages: The compilation_id easily overflows unsigned short in large systems
+    unsigned int       t1_age;      // oldest compilation_id of tier1 nMethods.
+    unsigned int       t2_age;      // oldest compilation_id of tier2 nMethods.
+    unsigned int       tx_age;      // oldest compilation_id of inactive/not entrant nMethods.
+    unsigned short     t1_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     t2_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     tx_space;    // in units of _segment_size to "prevent" overflow
+    unsigned short     dead_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     stub_space;  // in units of _segment_size to "prevent" overflow
+    unsigned short     t1_count;
+    unsigned short     t2_count;
+    unsigned short     tx_count;
+    unsigned short     dead_count;
+    unsigned short     stub_count;
+    CompLevel          level;       // optimization level (see globalDefinitions.hpp)
+    //---<  replaced the correct enum typing with u2 to save space.
+    u2                 compiler;    // compiler which generated this blob. Type is CodeHeapState::compType
+    u2                 type;        // used only if granularity == segment_size. Type is CodeHeapState::blobType
+};
+
+//-----------
+//  FreeBlk
+//-----------
+//  Each free block in the code heap is represented by an instance
+//  of this FreeBlk struct. It collects all information we need to
+//  know about each free block.
+//  All FreeBlks of a heap segment are stored in the related FreeArray.
+struct FreeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of free block
+  unsigned int   len;          // length of free block
+
+  unsigned int   gap;          // gap to next free block
+  unsigned int   index;        // sequential number of free block
+  unsigned short n_gapBlocks;  // # used blocks in gap
+  bool           stubs_in_gap; // The occupied space between this and the next free block contains (unmovable) stubs or blobs.
+};
+
+//--------------
+//  TopSizeBlk
+//--------------
+//  The n largest blocks in the code heap are represented in an instance
+//  of this TopSizeBlk struct. It collects all information we need to
+//  know about those largest blocks.
+//  All TopSizeBlks of a heap segment are stored in the related TopSizeArray.
+struct TopSizeBlk : public CHeapObj<mtCode> {
+  HeapBlock*     start;       // address of block
+  unsigned int   len;          // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   index;        // ordering index, 0 is largest block
+                               // contains array index of next smaller block
+                               // -1 indicates end of list
+  CompLevel      level;        // optimization level (see globalDefinitions.hpp)
+  u2             compiler;     // compiler which generated this blob
+  u2             type;         // blob type
+};
+
+//---------------------------
+//  SizeDistributionElement
+//---------------------------
+//  During CodeHeap analysis, each allocated code block is associated with a
+//  SizeDistributionElement according to its size. Later on, the array of
+//  SizeDistributionElements is used to print a size distribution bar graph.
+//  All SizeDistributionElements of a heap segment are stored in the related SizeDistributionArray.
+struct SizeDistributionElement : public CHeapObj<mtCode> {
+                               // Range is [rangeStart..rangeEnd).
+  unsigned int   rangeStart;   // start of length range, in _segment_size units.
+  unsigned int   rangeEnd;     // end   of length range, in _segment_size units.
+  unsigned int   lenSum;       // length of block, in _segment_size units. Will never overflow int.
+
+  unsigned int   count;        // number of blocks assigned to this range.
+};
+
+//----------------
+//  CodeHeapStat
+//----------------
+//  Because we have to deal with multiple CodeHeaps, we need to
+//  collect "global" information in a segment-specific way as well.
+//  Thats what the CodeHeapStat and CodeHeapStatArray are used for.
+//  Before a heap segment is processed, the contents of the CodeHeapStat
+//  element is copied to the global variables (get_HeapStatGlobals).
+//  When processing is done, the possibly modified global variables are
+//  copied back (set_HeapStatGlobals) to the CodeHeapStat element.
+struct CodeHeapStat {
+    StatElement*                     StatArray;
+    struct FreeBlk*                  FreeArray;
+    struct TopSizeBlk*               TopSizeArray;
+    struct SizeDistributionElement*  SizeDistributionArray;
+    const char*                      heapName;
+    size_t                           segment_size;
+    // StatElement data
+    size_t        alloc_granules;
+    size_t        granule_size;
+    bool          segment_granules;
+    unsigned int  nBlocks_t1;
+    unsigned int  nBlocks_t2;
+    unsigned int  nBlocks_alive;
+    unsigned int  nBlocks_dead;
+    unsigned int  nBlocks_unloaded;
+    unsigned int  nBlocks_stub;
+    // FreeBlk data
+    unsigned int  alloc_freeBlocks;
+    // UsedBlk data
+    unsigned int  alloc_topSizeBlocks;
+    unsigned int  used_topSizeBlocks;
+    // method hotness data. Temperature range is [-reset_val..+reset_val]
+    int           avgTemp;
+    int           maxTemp;
+    int           minTemp;
+};
+
+#endif // SHARE_CODE_CODEHEAPSTATE_HPP
--- a/src/hotspot/share/code/compiledIC.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/compiledIC.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -235,7 +235,7 @@
     assert(k->verify_itable_index(itable_index), "sanity check");
 #endif //ASSERT
     CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
-                                                    call_info->resolved_klass());
+                                                    call_info->resolved_klass(), false);
     holder->claim();
     InlineCacheBuffer::create_transition_stub(this, holder, entry);
   } else {
@@ -273,7 +273,7 @@
   assert(!is_optimized(), "an optimized call cannot be megamorphic");
 
   // Cannot rely on cached_value. It is either an interface or a method.
-  return VtableStubs::is_entry_point(ic_destination());
+  return VtableStubs::entry_point(ic_destination()) != NULL;
 }
 
 bool CompiledIC::is_call_to_compiled() const {
@@ -525,9 +525,11 @@
     return true;
   }
   // itable stubs also use CompiledICHolder
-  if (VtableStubs::is_entry_point(entry) && VtableStubs::stub_containing(entry)->is_itable_stub()) {
-    return true;
+  if (cb != NULL && cb->is_vtable_blob()) {
+    VtableStub* s = VtableStubs::entry_point(entry);
+    return (s != NULL) && s->is_itable_stub();
   }
+
   return false;
 }
 
--- a/src/hotspot/share/code/dependencies.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/dependencies.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1812,18 +1812,18 @@
 }
 
 Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
-  assert(!oopDesc::is_null(call_site), "sanity");
-  assert(!oopDesc::is_null(method_handle), "sanity");
+  assert(call_site != NULL, "sanity");
+  assert(method_handle != NULL, "sanity");
   assert(call_site->is_a(SystemDictionary::CallSite_klass()),     "sanity");
 
   if (changes == NULL) {
     // Validate all CallSites
-    if (java_lang_invoke_CallSite::target(call_site) != method_handle)
+    if (!oopDesc::equals(java_lang_invoke_CallSite::target(call_site), method_handle))
       return call_site->klass();  // assertion failed
   } else {
     // Validate the given CallSite
-    if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) {
-      assert(method_handle != changes->method_handle(), "must be");
+    if (oopDesc::equals(call_site, changes->call_site()) && !oopDesc::equals(java_lang_invoke_CallSite::target(call_site), changes->method_handle())) {
+      assert(!oopDesc::equals(method_handle, changes->method_handle()), "must be");
       return call_site->klass();  // assertion failed
     }
   }
--- a/src/hotspot/share/code/dependencies.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/dependencies.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,6 +32,7 @@
 #include "code/compressedStream.hpp"
 #include "code/nmethod.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/hashtable.hpp"
 
--- a/src/hotspot/share/code/location.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/location.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,6 @@
 
 #include "asm/assembler.hpp"
 #include "code/vmreg.hpp"
-#include "memory/allocation.hpp"
 
 // A Location describes a concrete machine variable location
 // (such as integer or floating point register or a stack-held
--- a/src/hotspot/share/code/nmethod.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/nmethod.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -37,7 +37,6 @@
 #include "compiler/compilerDirectives.hpp"
 #include "compiler/directivesParser.hpp"
 #include "compiler/disassembler.hpp"
-#include "gc/shared/gcLocker.hpp"
 #include "interpreter/bytecode.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -53,6 +52,7 @@
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/code/oopRecorder.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/oopRecorder.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -30,6 +30,7 @@
 #include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
+#include "utilities/copy.hpp"
 
 #ifdef ASSERT
 template <class T> int ValueRecorder<T>::_find_index_calls = 0;
@@ -201,4 +202,3 @@
   }
   return _values.at(location).index();
 }
-
--- a/src/hotspot/share/code/pcDesc.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/pcDesc.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -25,7 +25,6 @@
 #ifndef SHARE_VM_CODE_PCDESC_HPP
 #define SHARE_VM_CODE_PCDESC_HPP
 
-#include "memory/allocation.hpp"
 
 // PcDescs map a physical PC (given as offset from start of nmethod) to
 // the corresponding source scope and byte code index.
--- a/src/hotspot/share/code/relocInfo.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/relocInfo.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,6 +28,7 @@
 #include "code/nmethod.hpp"
 #include "code/relocInfo.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/copy.hpp"
 #include "oops/oop.inline.hpp"
@@ -307,7 +308,7 @@
 void Relocation::const_set_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    *(narrowOop*)addr() = oopDesc::encode_heap_oop((oop) x);
+    *(narrowOop*)addr() = CompressedOops::encode((oop) x);
   } else {
 #endif
     *(address*)addr() = x;
@@ -319,7 +320,7 @@
 void Relocation::const_verify_data_value(address x) {
 #ifdef _LP64
   if (format() == relocInfo::narrow_oop_in_const) {
-    guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
+    guarantee(*(narrowOop*)addr() == CompressedOops::encode((oop) x), "must agree");
   } else {
 #endif
     guarantee(*(address*)addr() == x, "must agree");
--- a/src/hotspot/share/code/vmreg.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/vmreg.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define SHARE_VM_CODE_VMREG_HPP
 
 #include "asm/register.hpp"
-#include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
--- a/src/hotspot/share/code/vtableStubs.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/vtableStubs.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,7 +60,7 @@
 
    // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
    // If changing the name, update the other file accordingly.
-    BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
+    VtableBlob* blob = VtableBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
       return NULL;
     }
@@ -167,17 +167,18 @@
   _number_of_vtable_stubs++;
 }
 
-
-bool VtableStubs::is_entry_point(address pc) {
+VtableStub* VtableStubs::entry_point(address pc) {
   MutexLocker ml(VtableStubs_lock);
   VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
   VtableStub* s;
   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
-  return s == stub;
+  if (s == stub) {
+    return s;
+  }
+  return NULL;
 }
 
-
 bool VtableStubs::contains(address pc) {
   // simple solution for now - we may want to use
   // a faster way if this function is called often
--- a/src/hotspot/share/code/vtableStubs.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/code/vtableStubs.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@
  public:
   static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
   static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
-  static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
+  static VtableStub* entry_point(address pc);                        // vtable stub entry point for a pc
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
   static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
--- a/src/hotspot/share/compiler/compileBroker.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,6 +28,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
+#include "code/codeHeapState.hpp"
 #include "code/dependencyContext.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/compileLog.hpp"
@@ -50,6 +51,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/os.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/timerTrace.hpp"
@@ -522,7 +524,7 @@
 // CompileBroker::compilation_init
 //
 // Initialize the Compilation object
-void CompileBroker::compilation_init(TRAPS) {
+void CompileBroker::compilation_init_phase1(TRAPS) {
   _last_method_compiled[0] = '\0';
 
   // No need to initialize compilation system if we do not use it.
@@ -669,11 +671,14 @@
                                           (jlong)CompileBroker::no_compile,
                                           CHECK);
   }
+}
 
+// Completes compiler initialization. Compilation requests submitted
+// prior to this will be silently ignored.
+void CompileBroker::compilation_init_phase2() {
   _initialized = true;
 }
 
-
 JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
                                        AbstractCompiler* comp, bool compiler_thread, TRAPS) {
   JavaThread* thread = NULL;
@@ -2423,3 +2428,111 @@
     }
   }
 }
+
+// Print general/accumulated JIT information.
+void CompileBroker::print_info(outputStream *out) {
+  if (out == NULL) out = tty;
+  out->cr();
+  out->print_cr("======================");
+  out->print_cr("   General JIT info   ");
+  out->print_cr("======================");
+  out->cr();
+  out->print_cr("            JIT is : %7s",     should_compile_new_jobs() ? "on" : "off");
+  out->print_cr("  Compiler threads : %7d",     (int)CICompilerCount);
+  out->cr();
+  out->print_cr("CodeCache overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  out->print_cr("         Reserved size : " SIZE_FORMAT_W(7) " KB", CodeCache::max_capacity() / K);
+  out->print_cr("        Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K);
+  out->print_cr("  Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K);
+  out->cr();
+
+  out->cr();
+  out->print_cr("CodeCache cleaning overview");
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+  NMethodSweeper::print(out);
+  out->print_cr("--------------------------------------------------------");
+  out->cr();
+}
+
+// Note: tty_lock must not be held upon entry to this function.
+//       Print functions called from herein do "micro-locking" on tty_lock.
+//       That's a tradeoff which keeps together important blocks of output.
+//       At the same time, continuous tty_lock hold time is kept in check,
+//       preventing concurrently printing threads from stalling a long time.
+void CompileBroker::print_heapinfo(outputStream* out, const char* function, const char* granularity) {
+  TimeStamp ts_total;
+  TimeStamp ts;
+
+  bool allFun = !strcmp(function, "all");
+  bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
+  bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
+  bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
+  bool methodCount = !strcmp(function, "MethodCount") || allFun;
+  bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
+  bool methodAge = !strcmp(function, "MethodAge") || allFun;
+  bool methodNames = !strcmp(function, "MethodNames") || allFun;
+  bool discard = !strcmp(function, "discard") || allFun;
+
+  if (out == NULL) {
+    out = tty;
+  }
+
+  if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
+    out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
+    out->cr();
+    return;
+  }
+
+  ts_total.update(); // record starting point
+
+  if (aggregate) {
+    print_info(out);
+  }
+
+  // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
+  // That helps us getting a consistent view on the CodeHeap, at least for the "all" function.
+  // When we request individual parts of the analysis via the jcmd interface, it is possible
+  // that in between another thread (another jcmd user or the vm running into CodeCache OOM)
+  // updated the aggregated data. That's a tolerable tradeoff because we can't hold a lock
+  // across user interaction.
+  ts.update(); // record starting point
+  MutexLockerEx mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________", ts.seconds());
+  out->cr();
+
+  if (aggregate) {
+    // It is sufficient to hold the CodeCache_lock only for the aggregate step.
+    // All other functions operate on aggregated data - except MethodNames, but that should be safe.
+    // The separate CodeHeapStateAnalytics_lock protects the printing functions against
+    // concurrent aggregate steps. Acquire this lock before acquiring the CodeCache_lock.
+    // CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
+    // leading to an unnecessarily long hold time of the CodeCache_lock.
+    ts.update(); // record starting point
+    MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    out->cr();
+    out->print_cr("__ CodeCache lock wait took %10.3f seconds _________", ts.seconds());
+    out->cr();
+
+    ts.update(); // record starting point
+    CodeCache::aggregate(out, granularity);
+    out->cr();
+    out->print_cr("__ CodeCache lock hold took %10.3f seconds _________", ts.seconds());
+    out->cr();
+  }
+
+  if (usedSpace) CodeCache::print_usedSpace(out);
+  if (freeSpace) CodeCache::print_freeSpace(out);
+  if (methodCount) CodeCache::print_count(out);
+  if (methodSpace) CodeCache::print_space(out);
+  if (methodAge) CodeCache::print_age(out);
+  if (methodNames) CodeCache::print_names(out);
+  if (discard) CodeCache::discard(out);
+
+  out->cr();
+  out->print_cr("__ CodeHeapStateAnalytics total duration %10.3f seconds _________", ts_total.seconds());
+  out->cr();
+}
--- a/src/hotspot/share/compiler/compileBroker.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -282,7 +282,8 @@
     CompileQueue *q = compile_queue(comp_level);
     return q != NULL ? q->size() : 0;
   }
-  static void compilation_init(TRAPS);
+  static void compilation_init_phase1(TRAPS);
+  static void compilation_init_phase2();
   static void init_compiler_thread_log();
   static nmethod* compile_method(const methodHandle& method,
                                  int osr_bci,
@@ -381,6 +382,10 @@
 
   // Log that compilation profiling is skipped because metaspace is full.
   static void log_metaspace_failure();
+
+  // CodeHeap State Analytics.
+  static void print_info(outputStream *out);
+  static void print_heapinfo(outputStream *out, const char* function, const char* granularity );
 };
 
 #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -80,8 +80,8 @@
 // sparc/solaris for certain applications, but would gain from
 // further optimization and tuning efforts, and would almost
 // certainly gain from analysis of platform and environment.
-void CMSArguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void CMSArguments::initialize() {
+  GCArguments::initialize();
   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
 
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -34,7 +34,7 @@
   void disable_adaptive_size_policy(const char* collector_name);
   void set_parnew_gc_flags();
 public:
-  virtual void initialize_flags();
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,7 +28,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/generationSpec.hpp"
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -132,7 +132,7 @@
 CMSHeap* CMSHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
-  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
   return (CMSHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -57,7 +57,7 @@
   static CMSHeap* heap();
 
   virtual Name kind() const {
-    return CollectedHeap::CMSHeap;
+    return CollectedHeap::CMS;
   }
 
   virtual const char* name() const {
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,6 +28,8 @@
 #include "gc/cms/cmsOopClosures.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 // MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
@@ -45,13 +47,13 @@
 }
 
 // Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls)                                 \
-  template <class T> void cls::do_oop_work(T* p) {            \
-    T heap_oop = oopDesc::load_heap_oop(p);                   \
-    if (!oopDesc::is_null(heap_oop)) {                        \
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);  \
-      do_oop(obj);                                            \
-    }                                                         \
+#define DO_OOP_WORK_IMPL(cls)                               \
+  template <class T> void cls::do_oop_work(T* p) {          \
+    T heap_oop = RawAccess<>::oop_load(p);                  \
+    if (!CompressedOops::is_null(heap_oop)) {               \
+      oop obj = CompressedOops::decode_not_null(heap_oop);  \
+      do_oop(obj);                                          \
+    }                                                       \
   }
 
 #define DO_OOP_WORK_NV_IMPL(cls)                              \
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -37,6 +37,8 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/handles.inline.hpp"
@@ -2250,9 +2252,9 @@
   }
 
   template <class T> void do_oop_work(T* p) {
-    T heap_oop = oopDesc::load_heap_oop(p);
-    if (!oopDesc::is_null(heap_oop)) {
-      oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+    T heap_oop = RawAccess<>::oop_load(p);
+    if (!CompressedOops::is_null(heap_oop)) {
+      oop obj = CompressedOops::decode_not_null(heap_oop);
       do_oop(p, obj);
     }
   }
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -44,7 +44,7 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcPolicyCounters.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -62,6 +62,7 @@
 #include "memory/iterator.inline.hpp"
 #include "memory/padded.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/atomic.hpp"
@@ -6638,6 +6639,11 @@
   _mark_stack(mark_stack)
 { }
 
+template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
+  oop obj = RawAccess<>::oop_load(p);
+  do_oop(obj);
+}
+
 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
 
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1319,10 +1319,8 @@
   CMSMarkStack*    _mark_stack;
  protected:
   void do_oop(oop p);
-  template <class T> inline void do_oop_work(T *p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
-    do_oop(obj);
-  }
+  template <class T> void do_oop_work(T *p);
+
  public:
   PushAndMarkVerifyClosure(CMSCollector* cms_collector,
                            MemRegion span,
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -51,6 +51,8 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
@@ -679,8 +681,7 @@
 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -690,7 +691,7 @@
   _par_cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);;
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -706,8 +707,7 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
 #ifdef ASSERT
   {
-    assert(!oopDesc::is_null(*p), "expected non-null ref");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     // We never expect to see a null reference being processed
     // as a weak reference.
     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
@@ -717,7 +717,7 @@
   _cl->do_oop_nv(p);
 
   if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+    oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
 }
@@ -726,15 +726,15 @@
 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
 
 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded()
                       ? obj->forwardee()
                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
-      oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+      RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
     }
     if (_gc_barrier) {
       // If p points to a younger generation, mark the card.
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -32,10 +32,11 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 
 template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
-  assert (!oopDesc::is_null(*p), "null weak reference?");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
   if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
@@ -51,7 +52,7 @@
       new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
                                                                 obj, obj_sz, m);
     }
-    oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+    RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
   }
 }
 
@@ -60,8 +61,7 @@
 
 template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
   assert(generation()->is_in_reserved(p), "expected ref in generation");
-  assert(!oopDesc::is_null(*p), "expected non-null object");
-  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+  oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
   // If p points to a younger generation, mark the card.
   if ((HeapWord*)obj < gen_boundary()) {
     rs()->write_ref_field_gc_par(p, obj);
@@ -77,9 +77,9 @@
          && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
-  T heap_oop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(heap_oop)) {
-    oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+  T heap_oop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(heap_oop)) {
+    oop obj = CompressedOops::decode_not_null(heap_oop);
     if ((HeapWord*)obj < _boundary) {
 #ifndef PRODUCT
       if (_g->to()->is_in_reserved(obj)) {
@@ -111,14 +111,14 @@
       oop new_obj;
       if (m->is_marked()) { // Contains forwarding pointer.
         new_obj = ParNewGeneration::real_forwardee(obj);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
                                         "forwarded ",
                                         new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
       } else {
         size_t obj_sz = obj->size_given_klass(objK);
         new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
-        oopDesc::encode_store_heap_oop_not_null(p, new_obj);
+        RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
         if (root_scan) {
           // This may have pushed an object.  If we have a root
           // category with a lot of roots, can't let the queue get too
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/promotionInfo.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -26,8 +26,9 @@
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/promotionInfo.hpp"
 #include "gc/shared/genOopClosures.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/markOop.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
 
 /////////////////////////////////////////////////////////////////////////
 //// PromotionInfo
@@ -39,7 +40,7 @@
   PromotedObject* res;
   if (UseCompressedOops) {
     // The next pointer is a compressed oop stored in the top 32 bits
-    res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
+    res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
   } else {
     res = (PromotedObject*)(_next & next_mask);
   }
@@ -52,7 +53,7 @@
          "or insufficient alignment of objects");
   if (UseCompressedOops) {
     assert(_data._narrow_next == 0, "Overwrite?");
-    _data._narrow_next = oopDesc::encode_heap_oop(oop(x));
+    _data._narrow_next = CompressedOops::encode(oop(x));
   } else {
     _next |= (intptr_t)x;
   }
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,7 +27,7 @@
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/collectionSetChooser.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "runtime/atomic.hpp"
 
@@ -83,8 +84,7 @@
                   100), true /* C_Heap */),
     _front(0), _end(0), _first_par_unreserved_idx(0),
     _region_live_threshold_bytes(0), _remaining_reclaimable_bytes(0) {
-  _region_live_threshold_bytes =
-    HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  _region_live_threshold_bytes = mixed_gc_live_threshold_bytes();
 }
 
 #ifndef PRODUCT
@@ -148,6 +148,8 @@
   assert(!hr->is_pinned(),
          "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index());
   assert(!hr->is_young(), "should not be young!");
+  assert(hr->rem_set()->is_complete(),
+         "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index());
   _regions.append(hr);
   _end++;
   _remaining_reclaimable_bytes += hr->reclaimable_bytes();
@@ -203,6 +205,16 @@
   }
 }
 
+void CollectionSetChooser::iterate(HeapRegionClosure* cl) {
+  for (uint i = _front; i < _end; i++) {
+    HeapRegion* r = regions_at(i);
+    if (cl->do_heap_region(r)) {
+      cl->set_incomplete();
+      break;
+    }
+  }
+}
+
 void CollectionSetChooser::clear() {
   _regions.clear();
   _front = 0;
@@ -228,6 +240,10 @@
       // before we fill them up).
       if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
         _cset_updater.add_region(r);
+      } else if (r->is_old()) {
+        // Can clean out the remembered sets of all regions that we did not choose but
+        // we created the remembered set for.
+        r->rem_set()->clear(true);
       }
     }
     return false;
@@ -259,6 +275,18 @@
   return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
 }
 
+bool CollectionSetChooser::region_occupancy_low_enough_for_evac(size_t live_bytes) {
+  return live_bytes < mixed_gc_live_threshold_bytes();
+}
+
+bool CollectionSetChooser::should_add(HeapRegion* hr) const {
+  assert(hr->is_marked(), "pre-condition");
+  assert(!hr->is_young(), "should never consider young regions");
+  return !hr->is_pinned() &&
+          region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
+          hr->rem_set()->is_complete();
+}
+
 void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) {
   clear();
 
--- a/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,17 +101,19 @@
 
   CollectionSetChooser();
 
+  static size_t mixed_gc_live_threshold_bytes() {
+    return HeapRegion::GrainBytes * (size_t) G1MixedGCLiveThresholdPercent / 100;
+  }
+
+  static bool region_occupancy_low_enough_for_evac(size_t live_bytes);
+
   void sort_regions();
 
   // Determine whether to add the given region to the CSet chooser or
   // not. Currently, we skip pinned regions and regions whose live
   // bytes are over the threshold. Humongous regions may be reclaimed during cleanup.
-  bool should_add(HeapRegion* hr) {
-    assert(hr->is_marked(), "pre-condition");
-    assert(!hr->is_young(), "should never consider young regions");
-    return !hr->is_pinned() &&
-            hr->live_bytes() < _region_live_threshold_bytes;
-  }
+  // Regions also need a complete remembered set to be a candidate.
+  bool should_add(HeapRegion* hr) const;
 
   // Returns the number candidate old regions added
   uint length() { return _end; }
@@ -133,6 +135,9 @@
   // and the amount of reclaimable bytes by reclaimable_bytes.
   void update_totals(uint region_num, size_t reclaimable_bytes);
 
+  // Iterate over all collection set candidate regions.
+  void iterate(HeapRegionClosure* cl);
+
   void clear();
 
   void rebuild(WorkGang* workers, uint n_regions);
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,486 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Analytics.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1MMUTracker.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/debug.hpp"
-
-// ======= Concurrent Mark Thread ========
-
-// Check order in EXPAND_CURRENT_PHASES
-STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
-              ConcurrentGCPhaseManager::IDLE_PHASE);
-
-#define EXPAND_CONCURRENT_PHASES(expander)                              \
-  expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)  \
-  expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)          \
-  expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                       \
-  expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")      \
-  expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")          \
-  expander(CONCURRENT_MARK,, "Concurrent Mark")                         \
-  expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")              \
-  expander(BEFORE_REMARK,, NULL)                                        \
-  expander(REMARK,, NULL)                                               \
-  expander(CREATE_LIVE_DATA,, "Concurrent Create Live Data")            \
-  expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup")            \
-  expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")  \
-  /* */
-
-class G1ConcurrentPhase : public AllStatic {
-public:
-  enum {
-#define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
-#undef CONCURRENT_PHASE_ENUM
-    PHASE_ID_LIMIT
-  };
-};
-
-// The CM thread is created when the G1 garbage collector is used
-
-ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
-  ConcurrentGCThread(),
-  _cm(cm),
-  _state(Idle),
-  _phase_manager_stack(),
-  _vtime_accum(0.0),
-  _vtime_mark_accum(0.0) {
-
-  set_name("G1 Main Marker");
-  create_and_start();
-}
-
-class CMCheckpointRootsFinalClosure: public VoidClosure {
-
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->checkpoint_roots_final(false); // !clear_all_soft_refs
-  }
-};
-
-class CMCleanUp: public VoidClosure {
-  G1ConcurrentMark* _cm;
-public:
-
-  CMCleanUp(G1ConcurrentMark* cm) :
-    _cm(cm) {}
-
-  void do_void(){
-    _cm->cleanup();
-  }
-};
-
-double ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
-  // There are 3 reasons to use SuspendibleThreadSetJoiner.
-  // 1. To avoid concurrency problem.
-  //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
-  //      concurrently from ConcurrentMarkThread and VMThread.
-  // 2. If currently a gc is running, but it has not yet updated the MMU,
-  //    we will not forget to consider that pause in the MMU calculation.
-  // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
-  //    And then sleep for predicted amount of time by delay_to_keep_mmu().
-  SuspendibleThreadSetJoiner sts_join;
-
-  const G1Analytics* analytics = g1_policy->analytics();
-  double now = os::elapsedTime();
-  double prediction_ms = remark ? analytics->predict_remark_time_ms()
-                                : analytics->predict_cleanup_time_ms();
-  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
-  return mmu_tracker->when_ms(now, prediction_ms);
-}
-
-void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
-  if (g1_policy->adaptive_young_list_length()) {
-    jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
-    if (!cm()->has_aborted() && sleep_time_ms > 0) {
-      os::sleep(this, sleep_time_ms, false);
-    }
-  }
-}
-
-class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
-  G1ConcurrentMark* _cm;
-
- public:
-  G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
-    GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
-    _cm(cm)
-  {
-    _cm->gc_timer_cm()->register_gc_concurrent_start(title);
-  }
-
-  ~G1ConcPhaseTimer() {
-    _cm->gc_timer_cm()->register_gc_concurrent_end();
-  }
-};
-
-static const char* const concurrent_phase_names[] = {
-#define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
-  EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
-#undef CONCURRENT_PHASE_NAME
-  NULL                          // terminator
-};
-// Verify dense enum assumption.  +1 for terminator.
-STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
-              ARRAY_SIZE(concurrent_phase_names));
-
-// Returns the phase number for name, or a negative value if unknown.
-static int lookup_concurrent_phase(const char* name) {
-  const char* const* names = concurrent_phase_names;
-  for (uint i = 0; names[i] != NULL; ++i) {
-    if (strcmp(name, names[i]) == 0) {
-      return static_cast<int>(i);
-    }
-  }
-  return -1;
-}
-
-// The phase must be valid and must have a title.
-static const char* lookup_concurrent_phase_title(int phase) {
-  static const char* const titles[] = {
-#define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
-    EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
-#undef CONCURRENT_PHASE_TITLE
-  };
-  // Verify dense enum assumption.
-  STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
-
-  assert(0 <= phase, "precondition");
-  assert((uint)phase < ARRAY_SIZE(titles), "precondition");
-  const char* title = titles[phase];
-  assert(title != NULL, "precondition");
-  return title;
-}
-
-class G1ConcPhaseManager : public StackObj {
-  G1ConcurrentMark* _cm;
-  ConcurrentGCPhaseManager _manager;
-
-public:
-  G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
-    _cm(thread->cm()),
-    _manager(phase, thread->phase_manager_stack())
-  { }
-
-  ~G1ConcPhaseManager() {
-    // Deactivate the manager if marking aborted, to avoid blocking on
-    // phase exit when the phase has been requested.
-    if (_cm->has_aborted()) {
-      _manager.deactivate();
-    }
-  }
-
-  void set_phase(int phase, bool force) {
-    _manager.set_phase(phase, force);
-  }
-};
-
-// Combine phase management and timing into one convenient utility.
-class G1ConcPhase : public StackObj {
-  G1ConcPhaseTimer _timer;
-  G1ConcPhaseManager _manager;
-
-public:
-  G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
-    _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
-    _manager(phase, thread)
-  { }
-};
-
-const char* const* ConcurrentMarkThread::concurrent_phases() const {
-  return concurrent_phase_names;
-}
-
-bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
-  int phase = lookup_concurrent_phase(phase_name);
-  if (phase < 0) return false;
-
-  while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
-                                                   phase_manager_stack())) {
-    assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
-    if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
-      // If idle and the goal is !idle, start a collection.
-      G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
-    }
-  }
-  return true;
-}
-
-void ConcurrentMarkThread::run_service() {
-  _vtime_start = os::elapsedVTime();
-
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1Policy* g1_policy = g1h->g1_policy();
-
-  G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
-
-  while (!should_terminate()) {
-    // wait until started is set.
-    sleepBeforeNextCycle();
-    if (should_terminate()) {
-      break;
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
-
-    GCIdMark gc_id_mark;
-
-    cm()->concurrent_cycle_start();
-
-    GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
-    {
-      ResourceMark rm;
-      HandleMark   hm;
-      double cycle_start = os::elapsedVTime();
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
-        ClassLoaderDataGraph::clear_claimed_marks();
-      }
-
-      // We have to ensure that we finish scanning the root regions
-      // before the next GC takes place. To ensure this we have to
-      // make sure that we do not join the STS until the root regions
-      // have been scanned. If we did then it's possible that a
-      // subsequent GC could block us from joining the STS and proceed
-      // without the root regions have been scanned which would be a
-      // correctness issue.
-
-      {
-        G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
-        _cm->scan_root_regions();
-      }
-
-      // It would be nice to use the G1ConcPhase class here but
-      // the "end" logging is inside the loop and not at the end of
-      // a scope. Also, the timer doesn't support nesting.
-      // Mimicking the same log output instead.
-      {
-        G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
-        jlong mark_start = os::elapsed_counter();
-        const char* cm_title =
-          lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
-        log_info(gc, marking)("%s (%.3fs)",
-                              cm_title,
-                              TimeHelper::counter_to_seconds(mark_start));
-        for (uint iter = 1; !cm()->has_aborted(); ++iter) {
-          // Concurrent marking.
-          {
-            G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
-            _cm->mark_from_roots();
-          }
-          if (cm()->has_aborted()) break;
-
-          // Provide a control point after mark_from_roots.
-          {
-            G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
-          }
-          if (cm()->has_aborted()) break;
-
-          // Delay remark pause for MMU.
-          double mark_end_time = os::elapsedVTime();
-          jlong mark_end = os::elapsed_counter();
-          _vtime_mark_accum += (mark_end_time - cycle_start);
-          delay_to_keep_mmu(g1_policy, true /* remark */);
-          if (cm()->has_aborted()) break;
-
-          // Pause Remark.
-          log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
-                                cm_title,
-                                TimeHelper::counter_to_seconds(mark_start),
-                                TimeHelper::counter_to_seconds(mark_end),
-                                TimeHelper::counter_to_millis(mark_end - mark_start));
-          mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
-          CMCheckpointRootsFinalClosure final_cl(_cm);
-          VM_CGC_Operation op(&final_cl, "Pause Remark");
-          VMThread::execute(&op);
-          if (cm()->has_aborted()) {
-            break;
-          } else if (!cm()->restart_for_overflow()) {
-            break;              // Exit loop if no restart requested.
-          } else {
-            // Loop to restart for overflow.
-            mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
-            log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
-                                  cm_title, iter);
-          }
-        }
-      }
-
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CREATE_LIVE_DATA, this);
-        cm()->create_live_data();
-      }
-
-      double end_time = os::elapsedVTime();
-      // Update the total virtual time before doing this, since it will try
-      // to measure it to get the vtime for this marking.  We purposely
-      // neglect the presumably-short "completeCleanup" phase here.
-      _vtime_accum = (end_time - _vtime_start);
-
-      if (!cm()->has_aborted()) {
-        delay_to_keep_mmu(g1_policy, false /* cleanup */);
-
-        if (!cm()->has_aborted()) {
-          CMCleanUp cl_cl(_cm);
-          VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
-          VMThread::execute(&op);
-        }
-      } else {
-        // We don't want to update the marking status if a GC pause
-        // is already underway.
-        SuspendibleThreadSetJoiner sts_join;
-        g1h->collector_state()->set_mark_in_progress(false);
-      }
-
-      // Check if cleanup set the free_regions_coming flag. If it
-      // hasn't, we can just skip the next step.
-      if (g1h->free_regions_coming()) {
-        // The following will finish freeing up any regions that we
-        // found to be empty during cleanup. We'll do this part
-        // without joining the suspendible set. If an evacuation pause
-        // takes place, then we would carry on freeing regions in
-        // case they are needed by the pause. If a Full GC takes
-        // place, it would wait for us to process the regions
-        // reclaimed by cleanup.
-
-        // Now do the concurrent cleanup operation.
-        G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
-        _cm->complete_cleanup();
-
-        // Notify anyone who's waiting that there are no more free
-        // regions coming. We have to do this before we join the STS
-        // (in fact, we should not attempt to join the STS in the
-        // interval between finishing the cleanup pause and clearing
-        // the free_regions_coming flag) otherwise we might deadlock:
-        // a GC worker could be blocked waiting for the notification
-        // whereas this thread will be blocked for the pause to finish
-        // while it's trying to join the STS, which is conditional on
-        // the GC workers finishing.
-        g1h->reset_free_regions_coming();
-      }
-      guarantee(cm()->cleanup_list_is_empty(),
-                "at this point there should be no regions on the cleanup list");
-
-      // There is a tricky race before recording that the concurrent
-      // cleanup has completed and a potential Full GC starting around
-      // the same time. We want to make sure that the Full GC calls
-      // abort() on concurrent mark after
-      // record_concurrent_mark_cleanup_completed(), since abort() is
-      // the method that will reset the concurrent mark state. If we
-      // end up calling record_concurrent_mark_cleanup_completed()
-      // after abort() then we might incorrectly undo some of the work
-      // abort() did. Checking the has_aborted() flag after joining
-      // the STS allows the correct ordering of the two methods. There
-      // are two scenarios:
-      //
-      // a) If we reach here before the Full GC, the fact that we have
-      // joined the STS means that the Full GC cannot start until we
-      // leave the STS, so record_concurrent_mark_cleanup_completed()
-      // will complete before abort() is called.
-      //
-      // b) If we reach here during the Full GC, we'll be held up from
-      // joining the STS until the Full GC is done, which means that
-      // abort() will have completed and has_aborted() will return
-      // true to prevent us from calling
-      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
-      // not needed any more as the concurrent mark state has been
-      // already reset).
-      {
-        SuspendibleThreadSetJoiner sts_join;
-        if (!cm()->has_aborted()) {
-          g1_policy->record_concurrent_mark_cleanup_completed();
-        } else {
-          log_info(gc, marking)("Concurrent Mark Abort");
-        }
-      }
-
-      // We now want to allow clearing of the marking bitmap to be
-      // suspended by a collection pause.
-      // We may have aborted just before the remark. Do not bother clearing the
-      // bitmap then, as it has been done during mark abort.
-      if (!cm()->has_aborted()) {
-        G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
-        _cm->cleanup_for_next_mark();
-      } else {
-        assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
-      }
-    }
-
-    // Update the number of full collections that have been
-    // completed. This will also notify the FullGCCount_lock in case a
-    // Java thread is waiting for a full GC to happen (e.g., it
-    // called System.gc() with +ExplicitGCInvokesConcurrent).
-    {
-      SuspendibleThreadSetJoiner sts_join;
-      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
-
-      cm()->concurrent_cycle_end();
-    }
-
-    cpmanager.set_phase(G1ConcurrentPhase::IDLE, cm()->has_aborted() /* force */);
-  }
-  _cm->root_regions()->cancel_scan();
-}
-
-void ConcurrentMarkThread::stop_service() {
-  MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
-  CGC_lock->notify_all();
-}
-
-void ConcurrentMarkThread::sleepBeforeNextCycle() {
-  // We join here because we don't want to do the "shouldConcurrentMark()"
-  // below while the world is otherwise stopped.
-  assert(!in_progress(), "should have been cleared");
-
-  MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  while (!started() && !should_terminate()) {
-    CGC_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  if (started()) {
-    set_in_progress();
-  }
-}
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
-
-#include "gc/shared/concurrentGCPhaseManager.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-
-// The Concurrent Mark GC Thread triggers the parallel G1CMConcurrentMarkingTasks
-// as well as handling various marking cleanup.
-
-class G1ConcurrentMark;
-class G1Policy;
-
-class ConcurrentMarkThread: public ConcurrentGCThread {
-  friend class VMStructs;
-
-  double _vtime_start;  // Initial virtual time.
-  double _vtime_accum;  // Accumulated virtual time.
-  double _vtime_mark_accum;
-
-  G1ConcurrentMark*                _cm;
-
-  enum State {
-    Idle,
-    Started,
-    InProgress
-  };
-
-  volatile State _state;
-
-  // WhiteBox testing support.
-  ConcurrentGCPhaseManager::Stack _phase_manager_stack;
-
-  void sleepBeforeNextCycle();
-  // Delay marking to meet MMU.
-  void delay_to_keep_mmu(G1Policy* g1_policy, bool remark);
-  double mmu_sleep_time(G1Policy* g1_policy, bool remark);
-
-  void run_service();
-  void stop_service();
-
- public:
-  // Constructor
-  ConcurrentMarkThread(G1ConcurrentMark* cm);
-
-  // Total virtual time so far for this thread and concurrent marking tasks.
-  double vtime_accum();
-  // Marking virtual time so far this thread and concurrent marking tasks.
-  double vtime_mark_accum();
-
-  G1ConcurrentMark* cm()   { return _cm; }
-
-  void set_idle()          { assert(_state != Started, "must not be starting a new cycle"); _state = Idle; }
-  bool idle()              { return _state == Idle; }
-  void set_started()       { assert(_state == Idle, "cycle in progress"); _state = Started; }
-  bool started()           { return _state == Started; }
-  void set_in_progress()   { assert(_state == Started, "must be starting a cycle"); _state = InProgress; }
-  bool in_progress()       { return _state == InProgress; }
-
-  // Returns true from the moment a marking cycle is
-  // initiated (during the initial-mark pause when started() is set)
-  // to the moment when the cycle completes (just after the next
-  // marking bitmap has been cleared and in_progress() is
-  // cleared). While during_cycle() is true we will not start another cycle
-  // so that cycles do not overlap. We cannot use just in_progress()
-  // as the CM thread might take some time to wake up before noticing
-  // that started() is set and set in_progress().
-  bool during_cycle()      { return !idle(); }
-
-  // WhiteBox testing support.
-  const char* const* concurrent_phases() const;
-  bool request_concurrent_phase(const char* phase);
-
-  ConcurrentGCPhaseManager::Stack* phase_manager_stack() {
-    return &_phase_manager_stack;
-  }
-};
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_HPP
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-#define SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
-
-#include "gc/g1/concurrentMarkThread.hpp"
-#include "gc/g1/g1ConcurrentMark.hpp"
-
-  // Total virtual time so far.
-inline double ConcurrentMarkThread::vtime_accum() {
-  return _vtime_accum + _cm->all_task_accum_vtime();
-}
-
-// Marking virtual time so far
-inline double ConcurrentMarkThread::vtime_mark_accum() {
-  return _vtime_mark_accum + _cm->all_task_accum_vtime();
-}
-
-#endif // SHARE_VM_GC_G1_CONCURRENTMARKTHREAD_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -27,8 +27,10 @@
 #include "gc/g1/g1AllocRegion.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/g1/heapRegionType.hpp"
 #include "utilities/align.hpp"
 
 G1DefaultAllocator::G1DefaultAllocator(G1CollectedHeap* heap) :
@@ -72,13 +74,12 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
     // it's retired again.
     _g1h->old_set_remove(retained_region);
-    bool during_im = _g1h->collector_state()->during_initial_mark_pause();
+    bool during_im = _g1h->collector_state()->in_initial_mark_gc();
     retained_region->note_start_of_copying(during_im);
     old->set(retained_region);
     _g1h->hr_printer()->reuse(retained_region);
@@ -342,6 +343,7 @@
   } else {
     hr->set_closed_archive();
   }
+  _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
   _g1h->old_set_add(hr);
   _g1h->hr_printer()->alloc(hr);
   _allocated_regions.append(hr);
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,16 +166,16 @@
   _cost_scan_hcc_seq->add(cost_scan_hcc);
 }
 
-void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc) {
+  if (for_young_gc) {
     _cost_per_entry_ms_seq->add(cost_per_entry_ms);
   } else {
     _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
   }
 }
 
-void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young) {
-  if (last_gc_was_young) {
+void G1Analytics::report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc) {
+  if (for_young_gc) {
     _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
   } else {
     _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
@@ -186,8 +186,8 @@
   _rs_length_diff_seq->add(rs_length_diff);
 }
 
-void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window) {
-  if (in_marking_window) {
+void G1Analytics::report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress) {
+  if (mark_or_rebuild_in_progress) {
     _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
   } else {
     _cost_per_byte_ms_seq->add(cost_per_byte_ms);
@@ -246,16 +246,16 @@
   }
 }
 
-size_t G1Analytics::predict_card_num(size_t rs_length, bool gcs_are_young) const {
-  if (gcs_are_young) {
+size_t G1Analytics::predict_card_num(size_t rs_length, bool for_young_gc) const {
+  if (for_young_gc) {
     return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
   } else {
     return (size_t) (rs_length * predict_mixed_cards_per_entry_ratio());
   }
 }
 
-double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const {
-  if (gcs_are_young) {
+double G1Analytics::predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const {
+  if (for_young_gc) {
     return card_num * get_new_prediction(_cost_per_entry_ms_seq);
   } else {
     return predict_mixed_rs_scan_time_ms(card_num);
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,10 +101,10 @@
   void report_alloc_rate_ms(double alloc_rate);
   void report_cost_per_card_ms(double cost_per_card_ms);
   void report_cost_scan_hcc(double cost_scan_hcc);
-  void report_cost_per_entry_ms(double cost_per_entry_ms, bool last_gc_was_young);
-  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool last_gc_was_young);
+  void report_cost_per_entry_ms(double cost_per_entry_ms, bool for_young_gc);
+  void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc);
   void report_rs_length_diff(double rs_length_diff);
-  void report_cost_per_byte_ms(double cost_per_byte_ms, bool in_marking_window);
+  void report_cost_per_byte_ms(double cost_per_byte_ms, bool mark_or_rebuild_in_progress);
   void report_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_non_young_other_cost_per_region_ms(double other_cost_per_region_ms);
   void report_constant_other_time_ms(double constant_other_time_ms);
@@ -126,9 +126,9 @@
 
   double predict_mixed_cards_per_entry_ratio() const;
 
-  size_t predict_card_num(size_t rs_length, bool gcs_are_young) const;
+  size_t predict_card_num(size_t rs_length, bool for_young_gc) const;
 
-  double predict_rs_scan_time_ms(size_t card_num, bool gcs_are_young) const;
+  double predict_rs_scan_time_ms(size_t card_num, bool for_young_gc) const;
 
   double predict_mixed_rs_scan_time_ms(size_t card_num) const;
 
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -37,8 +38,42 @@
   return HeapRegion::max_region_size();
 }
 
-void G1Arguments::initialize_flags() {
-  GCArguments::initialize_flags();
+void G1Arguments::initialize_verification_types() {
+  if (strlen(VerifyGCType) > 0) {
+    const char delimiter[] = " ,\n";
+    size_t length = strlen(VerifyGCType);
+    char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
+    strncpy(type_list, VerifyGCType, length + 1);
+    char* token = strtok(type_list, delimiter);
+    while (token != NULL) {
+      parse_verification_type(token);
+      token = strtok(NULL, delimiter);
+    }
+    FREE_C_HEAP_ARRAY(char, type_list);
+  }
+}
+
+void G1Arguments::parse_verification_type(const char* type) {
+  if (strcmp(type, "young-only") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungOnly);
+  } else if (strcmp(type, "initial-mark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyInitialMark);
+  } else if (strcmp(type, "mixed") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
+  } else if (strcmp(type, "remark") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
+  } else if (strcmp(type, "cleanup") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
+  } else if (strcmp(type, "full") == 0) {
+    G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
+  } else {
+    log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
+                            "young-only, initial-mark, mixed, remark, cleanup and full", type);
+  }
+}
+
+void G1Arguments::initialize() {
+  GCArguments::initialize();
   assert(UseG1GC, "Error");
   FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
   if (ParallelGCThreads == 0) {
@@ -100,12 +135,8 @@
     }
   }
 #endif
-}
 
-bool G1Arguments::parse_verification_type(const char* type) {
-  G1CollectedHeap::heap()->verifier()->parse_verification_type(type);
-  // Always return true because we want to parse all values.
-  return true;
+  initialize_verification_types();
 }
 
 CollectedHeap* G1Arguments::create_heap() {
--- a/src/hotspot/share/gc/g1/g1Arguments.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1Arguments.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,9 +31,14 @@
 class CollectedHeap;
 
 class G1Arguments : public GCArguments {
+  friend class G1HeapVerifierTest_parse_Test;
+
+private:
+  static void initialize_verification_types();
+  static void parse_verification_type(const char* type);
+
 public:
-  virtual void initialize_flags();
-  virtual bool parse_verification_type(const char* type);
+  virtual void initialize();
   virtual size_t conservative_max_heap_alignment();
   virtual CollectedHeap* create_heap();
 };
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -30,6 +30,8 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/satbMarkQueue.hpp"
 #include "logging/log.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/thread.inline.hpp"
@@ -77,9 +79,9 @@
   if (!JavaThread::satb_mark_queue_set().is_active()) return;
   T* elem_ptr = dst;
   for (size_t i = 0; i < count; i++, elem_ptr++) {
-    T heap_oop = oopDesc::load_heap_oop(elem_ptr);
-    if (!oopDesc::is_null(heap_oop)) {
-      enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+    T heap_oop = RawAccess<>::oop_load(elem_ptr);
+    if (!CompressedOops::is_null(heap_oop)) {
+      enqueue(CompressedOops::decode_not_null(heap_oop));
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,7 +28,9 @@
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/shared/accessBarrierSupport.inline.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
+#include "oops/oop.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void G1BarrierSet::write_ref_field_pre(T* field) {
@@ -38,8 +40,8 @@
   }
 
   T heap_oop = RawAccess<MO_VOLATILE>::oop_load(field);
-  if (!oopDesc::is_null(heap_oop)) {
-    enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
+  if (!CompressedOops::is_null(heap_oop)) {
+    enqueue(CompressedOops::decode_not_null(heap_oop));
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,587 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "logging/log.hpp"
-#include "memory/universe.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/debug.hpp"
-
-G1CardLiveData::G1CardLiveData() :
-  _max_capacity(0),
-  _cards_per_region(0),
-  _gc_timestamp_at_create(0),
-  _live_regions(NULL),
-  _live_regions_size_in_bits(0),
-  _live_cards(NULL),
-  _live_cards_size_in_bits(0) {
-}
-
-G1CardLiveData::~G1CardLiveData()  {
-  free_large_bitmap(_live_cards, _live_cards_size_in_bits);
-  free_large_bitmap(_live_regions, _live_regions_size_in_bits);
-}
-
-G1CardLiveData::bm_word_t* G1CardLiveData::allocate_large_bitmap(size_t size_in_bits) {
-  size_t size_in_words = BitMap::calc_size_in_words(size_in_bits);
-
-  bm_word_t* map = MmapArrayAllocator<bm_word_t>::allocate(size_in_words, mtGC);
-
-  return map;
-}
-
-void G1CardLiveData::free_large_bitmap(bm_word_t* bitmap, size_t size_in_bits) {
-  MmapArrayAllocator<bm_word_t>::free(bitmap, BitMap::calc_size_in_words(size_in_bits));
-}
-
-void G1CardLiveData::initialize(size_t max_capacity, uint num_max_regions) {
-  assert(max_capacity % num_max_regions == 0,
-         "Given capacity must be evenly divisible by region size.");
-  size_t region_size = max_capacity / num_max_regions;
-  assert(region_size % (G1CardTable::card_size * BitsPerWord) == 0,
-         "Region size must be evenly divisible by area covered by a single word.");
-  _max_capacity = max_capacity;
-  _cards_per_region = region_size / G1CardTable::card_size;
-
-  _live_regions_size_in_bits = live_region_bitmap_size_in_bits();
-  _live_regions = allocate_large_bitmap(_live_regions_size_in_bits);
-  _live_cards_size_in_bits = live_card_bitmap_size_in_bits();
-  _live_cards = allocate_large_bitmap(_live_cards_size_in_bits);
-}
-
-void G1CardLiveData::pretouch() {
-  live_cards_bm().pretouch();
-  live_regions_bm().pretouch();
-}
-
-size_t G1CardLiveData::live_region_bitmap_size_in_bits() const {
-  return _max_capacity / (_cards_per_region << G1CardTable::card_shift);
-}
-
-size_t G1CardLiveData::live_card_bitmap_size_in_bits() const {
-  return _max_capacity >> G1CardTable::card_shift;
-}
-
-// Helper class that provides functionality to generate the Live Data Count
-// information.
-class G1CardLiveDataHelper {
-private:
-  BitMapView _region_bm;
-  BitMapView _card_bm;
-
-  // The card number of the bottom of the G1 heap.
-  // Used in biasing indices into accounting card bitmaps.
-  BitMap::idx_t _heap_card_bias;
-
-  // Utility routine to set an exclusive range of bits on the given
-  // bitmap, optimized for very small ranges.
-  // There must be at least one bit to set.
-  void set_card_bitmap_range(BitMap::idx_t start_idx,
-                             BitMap::idx_t end_idx) {
-
-    // Set the exclusive bit range [start_idx, end_idx).
-    assert((end_idx - start_idx) > 0, "at least one bit");
-
-    // For small ranges use a simple loop; otherwise use set_range.
-    // The range is made up of the cards that are spanned by an object/mem
-    // region so 8 cards will allow up to object sizes up to 4K to be handled
-    // using the loop.
-    if ((end_idx - start_idx) <= 8) {
-      for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
-        _card_bm.set_bit(i);
-      }
-    } else {
-      _card_bm.set_range(start_idx, end_idx);
-    }
-  }
-
-  // We cache the last mark set. This avoids setting the same bit multiple times.
-  // This is particularly interesting for dense bitmaps, as this avoids doing
-  // lots of work most of the time.
-  BitMap::idx_t _last_marked_bit_idx;
-
-  void clear_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    _card_bm.clear_range(start_idx, end_idx);
-  }
-
-  // Mark the card liveness bitmap for the object spanning from start to end.
-  void mark_card_bitmap_range(HeapWord* start, HeapWord* end) {
-    BitMap::idx_t start_idx = card_live_bitmap_index_for(start);
-    BitMap::idx_t end_idx = card_live_bitmap_index_for(align_up(end, CardTable::card_size));
-
-    assert((end_idx - start_idx) > 0, "Trying to mark zero sized range.");
-
-    if (start_idx == _last_marked_bit_idx) {
-      start_idx++;
-    }
-    if (start_idx == end_idx) {
-      return;
-    }
-
-    // Set the bits in the card bitmap for the cards spanned by this object.
-    set_card_bitmap_range(start_idx, end_idx);
-    _last_marked_bit_idx = end_idx - 1;
-  }
-
-  void reset_mark_cache() {
-    _last_marked_bit_idx = (BitMap::idx_t)-1;
-  }
-
-public:
-  // Returns the index in the per-card liveness count bitmap
-  // for the given address
-  inline BitMap::idx_t card_live_bitmap_index_for(HeapWord* addr) {
-    // Below, the term "card num" means the result of shifting an address
-    // by the card shift -- address 0 corresponds to card number 0.  One
-    // must subtract the card num of the bottom of the heap to obtain a
-    // card table index.
-    BitMap::idx_t card_num = uintptr_t(addr) >> G1CardTable::card_shift;
-    return card_num - _heap_card_bias;
-  }
-
-  // Takes a region that's not empty (i.e., it has at least one
-  // live object in it and sets its corresponding bit on the region
-  // bitmap to 1.
-  void set_bit_for_region(HeapRegion* hr) {
-    _region_bm.par_set_bit(hr->hrm_index());
-  }
-
-  void reset_live_data(HeapRegion* hr) {
-    clear_card_bitmap_range(hr->next_top_at_mark_start(), hr->end());
-  }
-
-  // Mark the range of bits covered by allocations done since the last marking
-  // in the given heap region, i.e. from NTAMS to top of the given region.
-  // Returns if there has been some allocation in this region since the last marking.
-  bool mark_allocated_since_marking(HeapRegion* hr) {
-    reset_mark_cache();
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* top   = hr->top();
-
-    assert(hr->bottom() <= ntams && ntams <= hr->end(), "Preconditions.");
-
-    // Mark the allocated-since-marking portion...
-    if (ntams < top) {
-      mark_card_bitmap_range(ntams, top);
-      return true;
-    } else {
-      return false;
-    }
-  }
-
-  // Mark the range of bits covered by live objects on the mark bitmap between
-  // bottom and NTAMS of the given region.
-  // Returns the number of live bytes marked within that area for the given
-  // heap region.
-  size_t mark_marked_during_marking(G1CMBitMap* mark_bitmap, HeapRegion* hr) {
-    reset_mark_cache();
-
-    size_t marked_bytes = 0;
-
-    HeapWord* ntams = hr->next_top_at_mark_start();
-    HeapWord* start = hr->bottom();
-
-    if (ntams <= start) {
-      // Skip empty regions.
-      return 0;
-    }
-    if (hr->is_humongous()) {
-      HeapRegion* start_region = hr->humongous_start_region();
-      if (mark_bitmap->is_marked(start_region->bottom())) {
-        mark_card_bitmap_range(start, hr->top());
-        return pointer_delta(hr->top(), start, 1);
-      } else {
-        // Humongous start object was actually dead.
-        return 0;
-      }
-    }
-
-    assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
-           "Preconditions not met - "
-           "start: " PTR_FORMAT ", ntams: " PTR_FORMAT ", end: " PTR_FORMAT,
-           p2i(start), p2i(ntams), p2i(hr->end()));
-
-    // Find the first marked object at or after "start".
-    start = mark_bitmap->get_next_marked_addr(start, ntams);
-    while (start < ntams) {
-      oop obj = oop(start);
-      size_t obj_size = obj->size();
-      HeapWord* obj_end = start + obj_size;
-
-      assert(obj_end <= hr->end(), "Humongous objects must have been handled elsewhere.");
-
-      mark_card_bitmap_range(start, obj_end);
-
-      // Add the size of this object to the number of marked bytes.
-      marked_bytes += obj_size * HeapWordSize;
-
-      // Find the next marked object after this one.
-      start = mark_bitmap->get_next_marked_addr(obj_end, ntams);
-    }
-
-    return marked_bytes;
-  }
-
-  G1CardLiveDataHelper(G1CardLiveData* live_data, HeapWord* base_address) :
-    _region_bm(live_data->live_regions_bm()),
-    _card_bm(live_data->live_cards_bm()) {
-    // Calculate the card number for the bottom of the heap. Used
-    // in biasing indexes into the accounting card bitmaps.
-    _heap_card_bias =
-      uintptr_t(base_address) >> G1CardTable::card_shift;
-  }
-};
-
-class G1CreateCardLiveDataTask: public AbstractGangTask {
-  // Aggregate the counting data that was constructed concurrently
-  // with marking.
-  class G1CreateLiveDataClosure : public HeapRegionClosure {
-    G1CardLiveDataHelper _helper;
-
-    G1CMBitMap* _mark_bitmap;
-
-    G1ConcurrentMark* _cm;
-  public:
-    G1CreateLiveDataClosure(G1CollectedHeap* g1h,
-                            G1ConcurrentMark* cm,
-                            G1CMBitMap* mark_bitmap,
-                            G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _mark_bitmap(mark_bitmap),
-      _cm(cm) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      if (marked_bytes > 0) {
-        hr->add_to_marked_bytes(marked_bytes);
-      }
-
-      return (_cm->do_yield_check() && _cm->has_aborted());
-    }
-  };
-
-  G1ConcurrentMark* _cm;
-  G1CardLiveData* _live_data;
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1CreateCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* live_data,
-                           uint n_workers) :
-      AbstractGangTask("G1 Create Live Data"),
-      _live_data(live_data),
-      _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    SuspendibleThreadSetJoiner sts_join;
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    G1ConcurrentMark* cm = g1h->concurrent_mark();
-    G1CreateLiveDataClosure cl(g1h, cm, cm->next_mark_bitmap(), _live_data);
-    g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::create(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  _gc_timestamp_at_create = G1CollectedHeap::heap()->get_gc_time_stamp();
-
-  uint n_workers = workers->active_workers();
-
-  G1CreateCardLiveDataTask cl(mark_bitmap,
-                              this,
-                              n_workers);
-  workers->run_task(&cl);
-}
-
-class G1FinalizeCardLiveDataTask: public AbstractGangTask {
-  // Finalizes the liveness counting data.
-  // Sets the bits corresponding to the interval [NTAMS, top]
-  // (which contains the implicitly live objects) in the
-  // card liveness bitmap. Also sets the bit for each region
-  // containing live data, in the region liveness bitmap.
-  class G1FinalizeCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CardLiveDataHelper _helper;
-
-    uint _gc_timestamp_at_create;
-
-    bool has_been_reclaimed(HeapRegion* hr) const {
-      return hr->get_gc_time_stamp() > _gc_timestamp_at_create;
-    }
-  public:
-    G1FinalizeCardLiveDataClosure(G1CollectedHeap* g1h,
-                                  G1CMBitMap* bitmap,
-                                  G1CardLiveData* live_data) :
-      HeapRegionClosure(),
-      _helper(live_data, g1h->reserved_region().start()),
-      _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { }
-
-    bool do_heap_region(HeapRegion* hr) {
-      if (has_been_reclaimed(hr)) {
-        _helper.reset_live_data(hr);
-      }
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || hr->next_marked_bytes() > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return false;
-    }
-  };
-
-  G1CMBitMap* _bitmap;
-
-  G1CardLiveData* _live_data;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1FinalizeCardLiveDataTask(G1CMBitMap* bitmap, G1CardLiveData* live_data, uint n_workers) :
-    AbstractGangTask("G1 Finalize Card Live Data"),
-    _bitmap(bitmap),
-    _live_data(live_data),
-    _hr_claimer(n_workers) {
-  }
-
-  void work(uint worker_id) {
-    G1FinalizeCardLiveDataClosure cl(G1CollectedHeap::heap(), _bitmap, _live_data);
-
-    G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-  }
-};
-
-void G1CardLiveData::finalize(WorkGang* workers, G1CMBitMap* mark_bitmap) {
-  // Finalize the live data.
-  G1FinalizeCardLiveDataTask cl(mark_bitmap,
-                                this,
-                                workers->active_workers());
-  workers->run_task(&cl);
-}
-
-class G1ClearCardLiveDataTask : public AbstractGangTask {
-  BitMapView _bitmap;
-  size_t     _num_chunks;
-  size_t     _cur_chunk;
-public:
-  G1ClearCardLiveDataTask(const BitMapView& bitmap, size_t num_tasks) :
-    AbstractGangTask("G1 Clear Card Live Data"),
-    _bitmap(bitmap),
-    _num_chunks(num_tasks),
-    _cur_chunk(0) {
-  }
-
-  static size_t chunk_size() { return M; }
-
-  virtual void work(uint worker_id) {
-    while (true) {
-      size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
-      if (to_process >= _num_chunks) {
-        break;
-      }
-
-      BitMap::idx_t start = M * BitsPerByte * to_process;
-      BitMap::idx_t end = MIN2(start + M * BitsPerByte, _bitmap.size());
-      _bitmap.clear_range(start, end);
-    }
-  }
-};
-
-void G1CardLiveData::clear(WorkGang* workers) {
-  guarantee(Universe::is_fully_initialized(), "Should not call this during initialization.");
-
-  size_t const num_chunks = align_up(live_cards_bm().size_in_bytes(), G1ClearCardLiveDataTask::chunk_size()) / G1ClearCardLiveDataTask::chunk_size();
-  uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
-
-  G1ClearCardLiveDataTask cl(live_cards_bm(), num_chunks);
-
-  log_debug(gc, ergo)("Running %s using %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
-  workers->run_task(&cl, num_workers);
-
-  // The region live bitmap is always very small, even for huge heaps. Clear
-  // directly.
-  live_regions_bm().clear();
-}
-
-class G1VerifyCardLiveDataTask: public AbstractGangTask {
-  // Heap region closure used for verifying the live count data
-  // that was created concurrently and finalized during
-  // the remark pause. This closure is applied to the heap
-  // regions during the STW cleanup pause.
-  class G1VerifyCardLiveDataClosure: public HeapRegionClosure {
-  private:
-    G1CollectedHeap* _g1h;
-    G1CMBitMap* _mark_bitmap;
-    G1CardLiveDataHelper _helper;
-
-    G1CardLiveData* _act_live_data;
-
-    G1CardLiveData* _exp_live_data;
-
-    int _failures;
-
-    // Completely recreates the live data count for the given heap region and
-    // returns the number of bytes marked.
-    size_t create_live_data_count(HeapRegion* hr) {
-      size_t bytes_marked = _helper.mark_marked_during_marking(_mark_bitmap, hr);
-      bool allocated_since_marking = _helper.mark_allocated_since_marking(hr);
-      if (allocated_since_marking || bytes_marked > 0) {
-        _helper.set_bit_for_region(hr);
-      }
-      return bytes_marked;
-    }
-  public:
-    G1VerifyCardLiveDataClosure(G1CollectedHeap* g1h,
-                                G1CMBitMap* mark_bitmap,
-                                G1CardLiveData* act_live_data,
-                                G1CardLiveData* exp_live_data) :
-      _g1h(g1h),
-      _mark_bitmap(mark_bitmap),
-      _helper(exp_live_data, g1h->reserved_region().start()),
-      _act_live_data(act_live_data),
-      _exp_live_data(exp_live_data),
-      _failures(0) { }
-
-    int failures() const { return _failures; }
-
-    bool do_heap_region(HeapRegion* hr) {
-      int failures = 0;
-
-      // Walk the marking bitmap for this region and set the corresponding bits
-      // in the expected region and card bitmaps.
-      size_t exp_marked_bytes = create_live_data_count(hr);
-      size_t act_marked_bytes = hr->next_marked_bytes();
-      // Verify the marked bytes for this region.
-
-      if (exp_marked_bytes != act_marked_bytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " != actual marked bytes " SIZE_FORMAT " in region %u", exp_marked_bytes, act_marked_bytes, hr->hrm_index());
-        failures += 1;
-      } else if (exp_marked_bytes > HeapRegion::GrainBytes) {
-        log_error(gc)("Expected marked bytes " SIZE_FORMAT " larger than possible " SIZE_FORMAT " in region %u", exp_marked_bytes, HeapRegion::GrainBytes, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify the bit, for this region, in the actual and expected
-      // (which was just calculated) region bit maps.
-      uint index = hr->hrm_index();
-
-      bool expected = _exp_live_data->is_region_live(index);
-      bool actual = _act_live_data->is_region_live(index);
-      if (expected != actual) {
-        log_error(gc)("Expected liveness %d not equal actual %d in region %u", expected, actual, hr->hrm_index());
-        failures += 1;
-      }
-
-      // Verify that the card bit maps for the cards spanned by the current
-      // region match.
-      BitMap::idx_t start_idx = _helper.card_live_bitmap_index_for(hr->bottom());
-      BitMap::idx_t end_idx = _helper.card_live_bitmap_index_for(hr->top());
-
-      for (BitMap::idx_t i = start_idx; i < end_idx; i+=1) {
-        expected = _exp_live_data->is_card_live_at(i);
-        actual = _act_live_data->is_card_live_at(i);
-
-        if (expected != actual) {
-          log_error(gc)("Expected card liveness %d not equal actual card liveness %d at card " SIZE_FORMAT " in region %u", expected, actual, i, hr->hrm_index());
-          failures += 1;
-        }
-      }
-
-      _failures += failures;
-
-      // We could stop iteration over the heap when we
-      // find the first violating region by returning true.
-      return false;
-    }
-  };
-protected:
-  G1CollectedHeap* _g1h;
-  G1CMBitMap* _mark_bitmap;
-
-  G1CardLiveData* _act_live_data;
-
-  G1CardLiveData _exp_live_data;
-
-  int  _failures;
-
-  HeapRegionClaimer _hr_claimer;
-
-public:
-  G1VerifyCardLiveDataTask(G1CMBitMap* bitmap,
-                           G1CardLiveData* act_live_data,
-                           uint n_workers)
-  : AbstractGangTask("G1 Verify Card Live Data"),
-    _g1h(G1CollectedHeap::heap()),
-    _mark_bitmap(bitmap),
-    _act_live_data(act_live_data),
-    _exp_live_data(),
-    _failures(0),
-    _hr_claimer(n_workers) {
-    assert(VerifyDuringGC, "don't call this otherwise");
-    _exp_live_data.initialize(_g1h->max_capacity(), _g1h->max_regions());
-  }
-
-  void work(uint worker_id) {
-    G1VerifyCardLiveDataClosure cl(_g1h,
-                                   _mark_bitmap,
-                                   _act_live_data,
-                                   &_exp_live_data);
-    _g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hr_claimer, worker_id);
-
-    Atomic::add(cl.failures(), &_failures);
-  }
-
-  int failures() const { return _failures; }
-};
-
-void G1CardLiveData::verify(WorkGang* workers, G1CMBitMap* actual_bitmap) {
-    ResourceMark rm;
-
-    G1VerifyCardLiveDataTask cl(actual_bitmap,
-                                this,
-                                workers->active_workers());
-    workers->run_task(&cl);
-
-    guarantee(cl.failures() == 0, "Unexpected accounting failures");
-}
-
-#ifndef PRODUCT
-void G1CardLiveData::verify_is_clear() {
-  assert(live_cards_bm().count_one_bits() == 0, "Live cards bitmap must be clear.");
-  assert(live_regions_bm().count_one_bits() == 0, "Live regions bitmap must be clear.");
-}
-#endif
--- a/src/hotspot/share/gc/g1/g1CardLiveData.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP
-
-#include "gc/g1/g1CollectedHeap.hpp"
-#include "utilities/bitMap.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class G1CollectedHeap;
-class G1CMBitMap;
-class WorkGang;
-
-// Information about object liveness on the Java heap on a "card" basis.
-// Can be used for various purposes, like as remembered set for completely
-// coarsened remembered sets, scrubbing remembered sets or estimating liveness.
-// This information is created as part of the concurrent marking cycle.
-class G1CardLiveData {
-  friend class G1CardLiveDataHelper;
-  friend class G1VerifyCardLiveDataTask;
-private:
-  typedef BitMap::bm_word_t bm_word_t;
-  // Store some additional information about the covered area to be able to test.
-  size_t _max_capacity;
-  size_t _cards_per_region;
-
-  // Regions may be reclaimed while concurrently creating live data (e.g. due to humongous
-  // eager reclaim). This results in wrong live data for these regions at the end.
-  // So we need to somehow detect these regions, and during live data finalization completely
-  // recreate their information.
-  // This _gc_timestamp_at_create tracks the global timestamp when live data creation
-  // has started. Any regions with a higher time stamp have been cleared after that
-  // point in time, and need re-finalization.
-  // Unsynchronized access to this variable is okay, since this value is only set during a
-  // concurrent phase, and read only at the Cleanup safepoint. I.e. there is always
-  // full memory synchronization inbetween.
-  uint _gc_timestamp_at_create;
-  // The per-card liveness bitmap.
-  bm_word_t* _live_cards;
-  size_t _live_cards_size_in_bits;
-  // The per-region liveness bitmap.
-  bm_word_t* _live_regions;
-  size_t _live_regions_size_in_bits;
-  // The bits in this bitmap contain for every card whether it contains
-  // at least part of at least one live object.
-  BitMapView live_cards_bm() const { return BitMapView(_live_cards, _live_cards_size_in_bits); }
-  // The bits in this bitmap indicate that a given region contains some live objects.
-  BitMapView live_regions_bm() const { return BitMapView(_live_regions, _live_regions_size_in_bits); }
-
-  // Allocate a "large" bitmap from virtual memory with the given size in bits.
-  bm_word_t* allocate_large_bitmap(size_t size_in_bits);
-  void free_large_bitmap(bm_word_t* map, size_t size_in_bits);
-
-  inline BitMapView live_card_bitmap(uint region);
-
-  inline bool is_card_live_at(BitMap::idx_t idx) const;
-
-  size_t live_region_bitmap_size_in_bits() const;
-  size_t live_card_bitmap_size_in_bits() const;
-public:
-  uint gc_timestamp_at_create() const { return _gc_timestamp_at_create; }
-
-  inline bool is_region_live(uint region) const;
-
-  inline void remove_nonlive_cards(uint region, BitMap* bm);
-  inline void remove_nonlive_regions(BitMap* bm);
-
-  G1CardLiveData();
-  ~G1CardLiveData();
-
-  void initialize(size_t max_capacity, uint num_max_regions);
-  void pretouch();
-
-  // Create the initial liveness data based on the marking result from the bottom
-  // to the ntams of every region in the heap and the marks in the given bitmap.
-  void create(WorkGang* workers, G1CMBitMap* mark_bitmap);
-  // Finalize the liveness data.
-  void finalize(WorkGang* workers, G1CMBitMap* mark_bitmap);
-
-  // Verify that the liveness count data created concurrently matches one created
-  // during this safepoint.
-  void verify(WorkGang* workers, G1CMBitMap* actual_bitmap);
-  // Clear all data structures, prepare for next processing.
-  void clear(WorkGang* workers);
-
-  void verify_is_clear() PRODUCT_RETURN;
-};
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_HPP */
-
--- a/src/hotspot/share/gc/g1/g1CardLiveData.inline.hpp	Thu Apr 12 16:25:29 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-#define SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP
-
-#include "gc/g1/g1CardLiveData.hpp"
-#include "utilities/bitMap.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-inline BitMapView G1CardLiveData::live_card_bitmap(uint region) {
-  return BitMapView(_live_cards + ((size_t)region * _cards_per_region >> LogBitsPerWord), _cards_per_region);
-}
-
-inline bool G1CardLiveData::is_card_live_at(BitMap::idx_t idx) const {
-  return live_cards_bm().at(idx);
-}
-
-inline bool G1CardLiveData::is_region_live(uint region) const {
-  return live_regions_bm().at(region);
-}
-
-inline void G1CardLiveData::remove_nonlive_cards(uint region, BitMap* bm) {
-  bm->set_intersection(live_card_bitmap(region));
-}
-
-inline void G1CardLiveData::remove_nonlive_regions(BitMap* bm) {
-  bm->set_intersection(live_regions_bm());
-}
-
-#endif /* SHARE_VM_GC_G1_G1CARDLIVEDATA_INLINE_HPP */
--- a/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -28,14 +28,16 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 
 template <typename T>
 void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
   _work->do_oop(p);
-  T oop_or_narrowoop = oopDesc::load_heap_oop(p);
-  if (!oopDesc::is_null(oop_or_narrowoop)) {
-    oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
+  T oop_or_narrowoop = RawAccess<>::oop_load(p);
+  if (!CompressedOops::is_null(oop_or_narrowoop)) {
+    oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
     HeapRegion* hr = _g1h->heap_region_containing(o);
     assert(!_g1h->is_in_cset(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
     hr->add_strong_code_root(_nm);
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -30,6 +30,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "memory/heap.hpp"
 #include "memory/iterator.hpp"
+#include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/stack.inline.hpp"
@@ -274,7 +275,7 @@
 
     template <typename T>
     void do_oop_work(T* p) {
-      if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
+      if (_hr->is_in(RawAccess<>::oop_load(p))) {
         _points_into = true;
       }
     }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Apr 12 16:25:29 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Apr 13 09:04:18 2018 -0700
@@ -29,7 +29,6 @@
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
 #include "gc/g1/bufferingOopClosure.hpp"
-#include "gc/g1/concurrentMarkThread.inline.hpp"
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
@@ -37,6 +36,7 @@
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1ConcurrentRefineThread.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1FullCollector.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -62,7 +62,7 @@
 #include "gc/shared/adaptiveSizePolicy.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
@@ -77,6 +77,8 @@
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/atomic.hpp"
@@ -154,63 +156,13 @@
 
 // Private methods.
 
-HeapRegion*
-G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
-  MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
-  while (!_secondary_free_list.is_empty() || free_regions_coming()) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "secondary_free_list has %u entries",
-                                      _secondary_free_list.length());
-      // It looks as if there are free regions available on the
-      // secondary_free_list. Let's move them to the free_list and try
-      // again to allocate from it.
-      append_secondary_free_list();
-
-      assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
-             "empty we should have moved at least one entry to the free_list");
-      HeapRegion* res = _hrm.allocate_free_region(is_old);
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "allocated " HR_FORMAT " from secondary_free_list",
-                                      HR_FORMAT_PARAMS(res));
-      return res;
-    }
-
-    // Wait here until we get notified either when (a) there are no
-    // more free regions coming or (b) some regions have been moved on
-    // the secondary_free_list.
-    SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
-  }
-
-  log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                  "could not allocate from secondary_free_list");
-  return NULL;
-}
-
 HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool is_old, bool do_expand) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res;
-  if (G1StressConcRegionFreeing) {
-    if (!_secondary_free_list.is_empty()) {
-      log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                      "forced to look at the secondary_free_list");
-      res = new_region_try_secondary_free_list(is_old);
-      if (res != NULL) {
-        return res;
-      }
-    }
-  }
-
-  res = _hrm.allocate_free_region(is_old);
-
-  if (res == NULL) {
-    log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : "
-                                    "res == NULL, trying the secondary_free_list");
-    res = new_region_try_secondary_free_list(is_old);
-  }
+  HeapRegion* res = _hrm.allocate_free_region(is_old);
+
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
     // do_expand to true. So, we should only reach here during a
@@ -301,12 +253,14 @@
   // that there is a single object that starts at the bottom of the
   // first region.
   first_hr->set_starts_humongous(obj_top, word_fill_size);
+  _g1_policy->remset_tracker()->update_at_allocate(first_hr);
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
   for (uint i = first + 1; i <= last; ++i) {
     hr = region_at(i);
     hr->set_continues_humongous(first_hr);
+    _g1_policy->remset_tracker()->update_at_allocate(hr);
   }
 
   // Up to this point no concurrent thread would have been able to
@@ -376,17 +330,6 @@
       first = hr->hrm_index();
     }
   } else {
-    // We can't allocate humongous regions spanning more than one region while
-    // cleanupComplete() is running, since some of the regions we find to be
-    // empty might not yet be added to the free list. It is not straightforward
-    // to know in which list they are on so that we can remove them. We only
-    // need to do this if we need to allocate more than one region to satisfy the
-    // current humongous allocation request. If we are only allocating one region
-    // we use the one-region region allocation code (see above), that already
-    // potentially waits for regions from the secondary free list.
-    wait_while_free_regions_coming();
-    append_secondary_free_list_if_not_empty_with_lock();
-
     // Policy: Try only empty regions (i.e. already committed first). Maybe we
     // are lucky enough to find some.
     first = _hrm.find_contiguous_only_empty(obj_regions);
@@ -1022,11 +965,6 @@
 }
 
 void G1CollectedHeap::abort_concurrent_cycle() {
-  // Note: When we have a more flexible GC logging framework that
-  // allows us to add optional attributes to a GC log record we
-  // could consider timing and reporting how long we wait in the
-  // following two methods.
-  wait_while_free_regions_coming();
   // If we start the compaction before the CM threads finish
   // scanning the root regions we might trip them over as we'll
   // be moving objects / updating references. So let's wait until
@@ -1034,7 +972,6 @@
   // early.
   _cm->root_regions()->abort();
   _cm->root_regions()->wait_until_scan_finished();
-  append_secondary_free_list_if_not_empty_with_lock();
 
   // Disable discovery and empty the discovered lists
   // for the CM ref processor.
@@ -1044,7 +981,7 @@
 
   // Abandon current iterations of concurrent marking and concurrent
   // refinement, if any are in progress.
-  concurrent_mark()->abort();
+  concurrent_mark()->concurrent_cycle_abort();
 }
 
 void G1CollectedHeap::prepare_heap_for_full_collection() {
@@ -1060,7 +997,6 @@
   abandon_collection_set(collection_set());
 
   tear_down_region_sets(false /* free_list_only */);
-  collector_state()->set_gcs_are_young(true);
 }
 
 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
@@ -1105,7 +1041,6 @@
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
-  check_gc_time_stamps();
   _hrm.verify_optional();
   _verifier->verify_region_sets_optional();
   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
@@ -1472,14 +1407,11 @@
   _cr(NULL),
   _g1mm(NULL),
   _preserved_marks_set(true /* in_c_heap */),
-  _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
   _humongous_reclaim_candidates(),
   _has_humongous_reclaim_candidates(false),
   _archive_allocator(NULL),
-  _free_regions_coming(false),
-  _gc_time_stamp(0),
   _summary_bytes_used(0),
   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
   _old_evac_stats("Old", OldPLABSize, PLABWeight),
@@ -1896,41 +1828,6 @@
   return _hrm.total_free_bytes();
 }
 
-void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
-  hr->reset_gc_time_stamp();
-}
-
-#ifndef PRODUCT
-
-class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
-private:
-  unsigned _gc_time_stamp;
-  bool _failures;
-
-public:
-  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
-    _gc_time_stamp(gc_time_stamp), _failures(false) { }
-
-  virtual bool do_heap_region(HeapRegion* hr) {
-    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
-    if (_gc_time_stamp != region_gc_time_stamp) {
-      log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
-                            region_gc_time_stamp, _gc_time_stamp);
-      _failures = true;
-    }
-    return false;
-  }
-
-  bool failures() { return _failures; }
-};
-
-void G1CollectedHeap::check_gc_time_stamps() {
-  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
-  heap_region_iterate(&cl);
-  guarantee(!cl.failures(), "all GC time stamps should have been reset");
-}
-#endif // PRODUCT
-
 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
   _hot_card_cache->drain(cl, worker_i);
 }
@@ -2351,7 +2248,7 @@
 void G1CollectedHeap::print_regions_on(outputStream* st) const {
   st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
                "HS=humongous(starts), HC=humongous(continues), "
-               "CS=collection set, F=free, A=archive, TS=gc time stamp, "
+               "CS=collection set, F=free, A=archive, "
                "TAMS=top-at-mark-start (previous, next)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
@@ -2482,7 +2379,7 @@
 G1CollectedHeap* G1CollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to G1CollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::G1CollectedHeap, "Not a G1CollectedHeap");
+  assert(heap->kind() == CollectedHeap::G1, "Invalid name");
   return (G1CollectedHeap*)heap;
 }
 
@@ -2497,9 +2394,6 @@
   increment_total_collections(full /* full gc */);
   if (full) {
     increment_old_marking_cycles_started();
-    reset_gc_time_stamp();
-  } else {
-    increment_gc_time_stamp();
   }
 
   // Fill TLAB's and such
@@ -2559,8 +2453,7 @@
   return result;
 }
 
-void
-G1CollectedHeap::doConcurrentMark() {
+void G1CollectedHeap::do_concurrent_mark() {
   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
   if (!_cmThread->in_progress()) {
     _cmThread->set_started();
@@ -2581,6 +2474,16 @@
   return buffer_size * buffer_num + extra_cards;
 }
 
+bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
+  // We don't nominate objects with many remembered set entries, on
+  // the assumption that such objects are likely still live.
+  HeapRegionRemSet* rem_set = r->rem_set();
+
+  return G1EagerReclaimHumongousObjectsWithStaleRefs ?
+         rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
+         G1EagerReclaimHumongousObjects && rem_set->is_empty();
+}
+
 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
  private:
   size_t _total_humongous;
@@ -2588,26 +2491,22 @@
 
   DirtyCardQueue _dcq;
 
-  // We don't nominate objects with many remembered set entries, on
-  // the assumption that such objects are likely still live.
-  bool is_remset_small(HeapRegion* region) const {
-    HeapRegionRemSet* const rset = region->rem_set();
-    return G1EagerReclaimHumongousObjectsWithStaleRefs
-      ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
-      : rset->is_empty();
-  }
-
-  bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
+  bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
     assert(region->is_starts_humongous(), "Must start a humongous object");
 
     oop obj = oop(region->bottom());
 
     // Dead objects cannot be eager reclaim candidates. Due to class
     // unloading it is unsafe to query their classes so we return early.
-    if (heap->is_obj_dead(obj, region)) {
+    if (g1h->is_obj_dead(obj, region)) {
       return false;
     }
 
+    // If we do not have a complete remembered set for the region, then we can
+    // not be sure that we have all references to it.
+    if (!region->rem_set()->is_complete()) {
+      return false;
+    }
     // Candidate selection must satisfy the following constraints
     // while concurrent marking is in progress:
     //
@@ -2644,7 +2543,8 @@
     // important use case for eager reclaim, and this special handling
     // may reduce needed headroom.
 
-    return obj->is_typeArray() && is_remset_small(region);
+    return obj->is_typeArray() &&
+           g1h->is_potential_eager_reclaim_candidate(region);
   }
 
  public:
@@ -2692,7 +2592,15 @@
         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
                hrrs.n_yielded(), r->rem_set()->occupied());
-        r->rem_set()->clear_locked();
+        // We should only clear the card based remembered set here as we will not
+        // implicitly rebuild anything else during eager reclaim. Note that at the moment
+        // (and probably never) we do not enter this path if there are other kind of
+        // remembered sets for this region.
+        r->rem_set()->clear_locked(true /* only_cardset */);
+        // Clear_locked() above sets the state to Empty. However we want to continue
+        // collecting remembered set entries for humongous regions that were not
+        // reclaimed.
+        r->rem_set()->set_state_complete();
       }
       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
     }
@@ -2846,28 +2754,28 @@
   // We should not be doing initial mark unless the conc mark thread is running
   if (!_cmThread->should_terminate()) {
     // This call will decide whether this pause is an initial-mark
-    // pause. If it is, during_initial_mark_pause() will return true
+    // pause. If it is, in_initial_mark_gc() will return true
     // for the duration of this pause.
     g1_policy()->decide_on_conc_mark_initiation();
   }
 
   // We do not allow initial-mark to be piggy-backed on a mixed GC.
-  assert(!collector_state()->during_initial_mark_pause() ||
-          collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->in_initial_mark_gc() ||
+          collector_state()->in_young_only_phase(), "sanity");
 
   // We also do not allow mixed GCs during marking.
-  assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
+  assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
 
   // Record whether this pause is an initial mark. When the current
   // thread has completed its logging output and it's safe to signal
   // the CM thread, the flag's value in the policy has been reset.
-  bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
+  bool should_start_conc_mark = collector_state()->in_initial_mark_gc();
 
   // Inner scope for scope based logging, timers, and stats collection
   {
     EvacuationInfo evacuation_info;
 
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
@@ -2880,10 +2788,10 @@
 
     G1HeapVerifier::G1VerifyType verify_type;
     FormatBuffer<> gc_string("Pause ");
-    if (collector_state()->during_initial_mark_pause()) {
+    if (collector_state()->in_initial_mark_gc()) {
       gc_string.append("Initial Mark");
       verify_type = G1HeapVerifier::G1VerifyInitialMark;
-    } else if (collector_state()->gcs_are_young()) {
+    } else if (collector_state()->in_young_only_phase()) {
       gc_string.append("Young");
       verify_type = G1HeapVerifier::G1VerifyYoungOnly;
     } else {
@@ -2895,22 +2803,12 @@
     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                                                   workers()->active_workers(),
                                                                   Threads::number_of_non_daemon_threads());
-    workers()->update_active_workers(active_workers);
+    active_workers = workers()->update_active_workers(active_workers);
     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
 
     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
     TraceMemoryManagerStats tms(&_memory_manager, gc_cause());
 
-    // If the secondary_free_list is not empty, append it to the
-    // free_list. No need to wait for the cleanup operation to finish;
-    // the region allocation code will check the secondary_free_list
-    // and wait if necessary. If the G1StressConcRegionFreeing flag is
-    // set, skip this step so that the region allocation code has to
-    // get entries from the secondary_free_list.
-    if (!G1StressConcRegionFreeing) {
-      append_secondary_free_list_if_not_empty_with_lock();
-    }
-
     G1HeapTransition heap_transition(this);
     size_t heap_used_bytes_before_gc = used();
 
@@ -2971,8 +2869,8 @@
 
         g1_policy()->record_collection_pause_start(sample_start_time_sec);
 
-        if (collector_state()->during_initial_mark_pause()) {
-          concurrent_mark()->checkpoint_roots_initial_pre();
+        if (collector_state()->in_initial_mark_gc()) {
+          concurrent_mark()->pre_initial_mark();
         }
 
         g1_policy()->finalize_collection_set(target_pause_time_ms, &_survivor);
@@ -3039,12 +2937,11 @@
           increase_used(g1_policy()->bytes_copied_during_gc());
         }
 
-        if (collector_state()->during_initial_mark_pause()) {
+        if (collector_state()->in_initial_mark_gc()) {
           // We have to do this before we notify the CM threads that
           // they can start working to make sure that all the
           // appropriate initialization is done on the CM object.
-          concurrent_mark()->checkpoint_roots_initial_post();
-          collector_state()->set_mark_in_progress(true);
+          concurrent_mark()->post_initial_mark();
           // Note that we don't actually trigger the CM thread at
           // this point. We do that later when we're sure that
           // the current thread has completed its logging output.
@@ -3151,7 +3048,7 @@
     // running. Note: of course, the actual marking work will
     // not start until the safepoint itself is released in
     // SuspendibleThreadSet::desynchronize().
-    doConcurrentMark();
+    do_concurrent_mark();
   }
 
   return true;
@@ -3810,7 +3707,7 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T* p) {
-    oop obj = oopDesc::load_decode_heap_oop(p);
+    oop obj = RawAccess<>::oop_load(p);
 
     if (_g1h->is_in_cset_or_humongous(obj)) {
       // If the referent object has been forwarded (either copied
@@ -4207,10 +4104,11 @@
 
   // If during an initial mark pause we install a pending list head which is not otherwise reachable
   // ensure that it is marked in the bitmap for concurrent marking to discover.
-  if (collector_state()->during_initial_mark_pause()) {
+  if (collector_state()->in_initial_mark_gc()) {
     oop pll_head = Universe::reference_pending_list();
     if (pll_head != NULL) {
-      _cm->mark_in_next_bitmap(pll_head);
+      // Any valid worker id is fine here as we are in the VM thread and single-threaded.
+      _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);