changeset 52244:de2d2d3d1824 datum

Automatic merge with default
author mcimadamore
date Thu, 23 Aug 2018 22:07:27 +0200
parents 64ddfaeb98da 6b0012622443
children d73adf8e2888
files src/hotspot/share/classfile/classFileParser.cpp src/hotspot/share/classfile/systemDictionary.hpp src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/gc/g1/ptrQueue.cpp src/hotspot/share/gc/g1/ptrQueue.hpp src/hotspot/share/gc/g1/satbMarkQueue.cpp src/hotspot/share/gc/g1/satbMarkQueue.hpp src/hotspot/share/oops/klass.cpp src/hotspot/share/oops/klass.hpp src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64InstructionAttr.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64VectorAssembler.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/IntrinsificationPredicate.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/hotspot/NotOnDebug.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorLIRInstruction.java test/hotspot/jtreg/runtime/appcds/cacheObject/RangeNotWithinHeap.java test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libfreebl3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnspr4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnss3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssckbi.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssdbm3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssutil3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libplc4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libplds4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libsoftokn3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libsqlite3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libssl3.dylib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/freebl3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/freebl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nspr4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nspr4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nss3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nss3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssckbi.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssdbm3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssdbm3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssutil3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssutil3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plc4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plc4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plds4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plds4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/softokn3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/softokn3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/sqlite3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/ssl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/ssl3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/freebl3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/freebl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nspr4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nspr4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nss3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nss3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssckbi.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssdbm3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssdbm3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssutil3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssutil3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plc4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plc4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plds4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plds4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/softokn3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/softokn3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/sqlite3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/ssl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/ssl3.lib
diffstat 786 files changed, 15117 insertions(+), 11639 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Aug 16 22:07:21 2018 +0200
+++ b/.hgtags	Thu Aug 23 22:07:27 2018 +0200
@@ -500,8 +500,11 @@
 ea900a7dc7d77dee30865c60eabd87fc24b1037c jdk-11+24
 331888ea4a788df801b1edf8836646cd25fc758b jdk-11+25
 945ba9278a272a5477ffb1b3ea1b04174fed8036 jdk-11+26
+9d7d74c6f2cbe522e39fa22dc557fdd3f79b32ad jdk-11+27
 69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
 990db216e7199b2ba9989d8fa20b657e0ca7d969 jdk-12+3
 499b873761d8e8a1cc4aa649daf04cbe98cbce77 jdk-12+4
 f8696e0ab9b795030429fc3374ec03e378fd9ed7 jdk-12+5
 7939b3c4e4088bf4f70ec5bbd8030393b653372f jdk-12+6
+ef57958c7c511162da8d9a75f0b977f0f7ac464e jdk-12+7
+492b366f8e5784cc4927c2c98f9b8a3f16c067eb jdk-12+8
--- a/make/CompileJavaModules.gmk	Thu Aug 16 22:07:21 2018 +0200
+++ b/make/CompileJavaModules.gmk	Thu Aug 23 22:07:27 2018 +0200
@@ -511,6 +511,10 @@
     --add-exports jdk.internal.vm.ci/jdk.vm.ci.sparc=jdk.internal.vm.compiler,jdk.aot \
     #
 
+jdk.aot_EXCLUDES += \
+    jdk.tools.jaotc.test
+    #
+
 ################################################################################
 
 sun.charsets_COPY += .dat
--- a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Thu Aug 16 22:07:21 2018 +0200
+++ b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Thu Aug 23 22:07:27 2018 +0200
@@ -431,14 +431,12 @@
                             }
                             uses.put(name, statement);
                             break;
-                        /*  Disable this check until jdk.internal.vm.compiler generated file is fixed.
                         case "provides":
                             if (provides.containsKey(name)) {
                                 throw parser.newError("multiple " + keyword + " " + name);
                             }
                             provides.put(name, statement);
                             break;
-                        */
                     }
                     String lookAhead = lookAhead(parser);
                     if (lookAhead.equals(statement.qualifier)) {
--- a/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Thu Aug 16 22:07:21 2018 +0200
+++ b/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Thu Aug 23 22:07:27 2018 +0200
@@ -230,7 +230,11 @@
             new String[] {
                 "   uses s;",
                 "   uses s;"
-            },                      ".*, line .*, multiple uses s.*"
+            },                      ".*, line .*, multiple uses s.*",
+            new String[] {
+                "   provides s with impl1;",
+                "   provides s with impl2, impl3;"
+            },                      ".*, line .*, multiple provides s.*"
     );
 
     void errorCases() {
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Aug 23 22:07:27 2018 +0200
@@ -1036,21 +1036,8 @@
   }
 };
 
-  // graph traversal helpers
-
-  MemBarNode *parent_membar(const Node *n);
-  MemBarNode *child_membar(const MemBarNode *n);
-  bool leading_membar(const MemBarNode *barrier);
-
-  bool is_card_mark_membar(const MemBarNode *barrier);
   bool is_CAS(int opcode);
 
-  MemBarNode *leading_to_normal(MemBarNode *leading);
-  MemBarNode *normal_to_leading(const MemBarNode *barrier);
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
-  MemBarNode *trailing_to_leading(const MemBarNode *trailing);
-
   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 
   bool unnecessary_acquire(const Node *barrier);
@@ -1272,605 +1259,6 @@
   // relevant dmb instructions.
   //
 
-  // graph traversal helpers used for volatile put/get and CAS
-  // optimization
-
-  // 1) general purpose helpers
-
-  // if node n is linked to a parent MemBarNode by an intervening
-  // Control and Memory ProjNode return the MemBarNode otherwise return
-  // NULL.
-  //
-  // n may only be a Load or a MemBar.
-
-  MemBarNode *parent_membar(const Node *n)
-  {
-    Node *ctl = NULL;
-    Node *mem = NULL;
-    Node *membar = NULL;
-
-    if (n->is_Load()) {
-      ctl = n->lookup(LoadNode::Control);
-      mem = n->lookup(LoadNode::Memory);
-    } else if (n->is_MemBar()) {
-      ctl = n->lookup(TypeFunc::Control);
-      mem = n->lookup(TypeFunc::Memory);
-    } else {
-	return NULL;
-    }
-
-    if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
-      return NULL;
-    }
-
-    membar = ctl->lookup(0);
-
-    if (!membar || !membar->is_MemBar()) {
-      return NULL;
-    }
-
-    if (mem->lookup(0) != membar) {
-      return NULL;
-    }
-
-    return membar->as_MemBar();
-  }
-
-  // if n is linked to a child MemBarNode by intervening Control and
-  // Memory ProjNodes return the MemBarNode otherwise return NULL.
-
-  MemBarNode *child_membar(const MemBarNode *n)
-  {
-    ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
-    ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
-
-    // MemBar needs to have both a Ctl and Mem projection
-    if (! ctl || ! mem)
-      return NULL;
-
-    MemBarNode *child = NULL;
-    Node *x;
-
-    for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
-      x = ctl->fast_out(i);
-      // if we see a membar we keep hold of it. we may also see a new
-      // arena copy of the original but it will appear later
-      if (x->is_MemBar()) {
-	  child = x->as_MemBar();
-	  break;
-      }
-    }
-
-    if (child == NULL) {
-      return NULL;
-    }
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      // if we see a membar we keep hold of it. we may also see a new
-      // arena copy of the original but it will appear later
-      if (x == child) {
-	return child;
-      }
-    }
-    return NULL;
-  }
-
-  // helper predicate use to filter candidates for a leading memory
-  // barrier
-  //
-  // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
-  // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
-
-  bool leading_membar(const MemBarNode *barrier)
-  {
-    int opcode = barrier->Opcode();
-    // if this is a release membar we are ok
-    if (opcode == Op_MemBarRelease) {
-      return true;
-    }
-    // if its a cpuorder membar . . .
-    if (opcode != Op_MemBarCPUOrder) {
-      return false;
-    }
-    // then the parent has to be a release membar
-    MemBarNode *parent = parent_membar(barrier);
-    if (!parent) {
-      return false;
-    }
-    opcode = parent->Opcode();
-    return opcode == Op_MemBarRelease;
-  }
-
-  // 2) card mark detection helper
-
-  // helper predicate which can be used to detect a volatile membar
-  // introduced as part of a conditional card mark sequence either by
-  // G1 or by CMS when UseCondCardMark is true.
-  //
-  // membar can be definitively determined to be part of a card mark
-  // sequence if and only if all the following hold
-  //
-  // i) it is a MemBarVolatile
-  //
-  // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
-  // true
-  //
-  // iii) the node's Mem projection feeds a StoreCM node.
-
-  bool is_card_mark_membar(const MemBarNode *barrier)
-  {
-    if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
-      return false;
-    }
-
-    if (barrier->Opcode() != Op_MemBarVolatile) {
-      return false;
-    }
-
-    ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
-      Node *y = mem->fast_out(i);
-      if (y->Opcode() == Op_StoreCM) {
-	return true;
-      }
-    }
-
-    return false;
-  }
-
-
-  // 3) helper predicates to traverse volatile put or CAS graphs which
-  // may contain GC barrier subgraphs
-
-  // Preamble
-  // --------
-  //
-  // for volatile writes we can omit generating barriers and employ a
-  // releasing store when we see a node sequence sequence with a
-  // leading MemBarRelease and a trailing MemBarVolatile as follows
-  //
-  //   MemBarRelease
-  //  {      ||      } -- optional
-  //  {MemBarCPUOrder}
-  //         ||     \\
-  //         ||     StoreX[mo_release]
-  //         | \     /
-  //         | MergeMem
-  //         | /
-  //  {MemBarCPUOrder} -- optional
-  //  {      ||      }
-  //   MemBarVolatile
-  //
-  // where
-  //  || and \\ represent Ctl and Mem feeds via Proj nodes
-  //  | \ and / indicate further routing of the Ctl and Mem feeds
-  //
-  // this is the graph we see for non-object stores. however, for a
-  // volatile Object store (StoreN/P) we may see other nodes below the
-  // leading membar because of the need for a GC pre- or post-write
-  // barrier.
-  //
-  // with most GC configurations we with see this simple variant which
-  // includes a post-write barrier card mark.
-  //
-  //   MemBarRelease______________________________
-  //         ||    \\               Ctl \        \\
-  //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
-  //         | \     /                       . . .  /
-  //         | MergeMem
-  //         | /
-  //         ||      /
-  //  {MemBarCPUOrder} -- optional
-  //  {      ||      }
-  //   MemBarVolatile
-  //
-  // i.e. the leading membar feeds Ctl to a CastP2X (which converts
-  // the object address to an int used to compute the card offset) and
-  // Ctl+Mem to a StoreB node (which does the actual card mark).
-  //
-  // n.b. a StoreCM node will only appear in this configuration when
-  // using CMS or G1. StoreCM differs from a normal card mark write (StoreB)
-  // because it implies a requirement to order visibility of the card
-  // mark (StoreCM) relative to the object put (StoreP/N) using a
-  // StoreStore memory barrier (arguably this ought to be represented
-  // explicitly in the ideal graph but that is not how it works). This
-  // ordering is required for both non-volatile and volatile
-  // puts. Normally that means we need to translate a StoreCM using
-  // the sequence
-  //
-  //   dmb ishst
-  //   strb
-  //
-  // However, when using G1 or CMS with conditional card marking (as
-  // we shall see) we don't need to insert the dmb when translating
-  // StoreCM because there is already an intervening StoreLoad barrier
-  // between it and the StoreP/N.
-  //
-  // It is also possible to perform the card mark conditionally on it
-  // currently being unmarked in which case the volatile put graph
-  // will look slightly different
-  //
-  //   MemBarRelease____________________________________________
-  //         ||    \\               Ctl \     Ctl \     \\  Mem \
-  //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
-  //         | \     /                              \            |
-  //         | MergeMem                            . . .      StoreB
-  //         | /                                                /
-  //         ||     /
-  //   MemBarVolatile
-  //
-  // It is worth noting at this stage that both the above
-  // configurations can be uniquely identified by checking that the
-  // memory flow includes the following subgraph:
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder}
-  //          |  \      . . .
-  //          |  StoreX[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile
-  //
-  // This is referred to as a *normal* subgraph. It can easily be
-  // detected starting from any candidate MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile.
-  //
-  // A simple variation on this normal case occurs for an unsafe CAS
-  // operation. The basic graph for a non-object CAS is
-  //
-  //   MemBarRelease
-  //         ||
-  //   MemBarCPUOrder
-  //         ||     \\   . . .
-  //         ||     CompareAndSwapX
-  //         ||       |
-  //         ||     SCMemProj
-  //         | \     /
-  //         | MergeMem
-  //         | /
-  //   MemBarCPUOrder
-  //         ||
-  //   MemBarAcquire
-  //
-  // The same basic variations on this arrangement (mutatis mutandis)
-  // occur when a card mark is introduced. i.e. we se the same basic
-  // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
-  // tail of the graph is a pair comprising a MemBarCPUOrder +
-  // MemBarAcquire.
-  //
-  // So, in the case of a CAS the normal graph has the variant form
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder
-  //          |   \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |    |
-  //          |   SCMemProj
-  //          |   /  . . .
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire
-  //
-  // This graph can also easily be detected starting from any
-  // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
-  //
-  // the code below uses two helper predicates, leading_to_normal and
-  // normal_to_leading to identify these normal graphs, one validating
-  // the layout starting from the top membar and searching down and
-  // the other validating the layout starting from the lower membar
-  // and searching up.
-  //
-  // There are two special case GC configurations when a normal graph
-  // may not be generated: when using G1 (which always employs a
-  // conditional card mark); and when using CMS with conditional card
-  // marking configured. These GCs are both concurrent rather than
-  // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
-  // graph between the leading and trailing membar nodes, in
-  // particular enforcing stronger memory serialisation beween the
-  // object put and the corresponding conditional card mark. CMS
-  // employs a post-write GC barrier while G1 employs both a pre- and
-  // post-write GC barrier. Of course the extra nodes may be absent --
-  // they are only inserted for object puts/swaps. This significantly
-  // complicates the task of identifying whether a MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
-  // when using these GC configurations (see below). It adds similar
-  // complexity to the task of identifying whether a MemBarRelease,
-  // CompareAndSwapX or MemBarAcquire forms part of a CAS.
-  //
-  // In both cases the post-write subtree includes an auxiliary
-  // MemBarVolatile (StoreLoad barrier) separating the object put/swap
-  // and the read of the corresponding card. This poses two additional
-  // problems.
-  //
-  // Firstly, a card mark MemBarVolatile needs to be distinguished
-  // from a normal trailing MemBarVolatile. Resolving this first
-  // problem is straightforward: a card mark MemBarVolatile always
-  // projects a Mem feed to a StoreCM node and that is a unique marker
-  //
-  //      MemBarVolatile (card mark)
-  //       C |    \     . . .
-  //         |   StoreCM   . . .
-  //       . . .
-  //
-  // The second problem is how the code generator is to translate the
-  // card mark barrier? It always needs to be translated to a "dmb
-  // ish" instruction whether or not it occurs as part of a volatile
-  // put. A StoreLoad barrier is needed after the object put to ensure
-  // i) visibility to GC threads of the object put and ii) visibility
-  // to the mutator thread of any card clearing write by a GC
-  // thread. Clearly a normal store (str) will not guarantee this
-  // ordering but neither will a releasing store (stlr). The latter
-  // guarantees that the object put is visible but does not guarantee
-  // that writes by other threads have also been observed.
-  //
-  // So, returning to the task of translating the object put and the
-  // leading/trailing membar nodes: what do the non-normal node graph
-  // look like for these 2 special cases? and how can we determine the
-  // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
-  // in both normal and non-normal cases?
-  //
-  // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
-  // which selects conditonal execution based on the value loaded
-  // (LoadB) from the card. Ctl and Mem are fed to the If via an
-  // intervening StoreLoad barrier (MemBarVolatile).
-  //
-  // So, with CMS we may see a node graph for a volatile object store
-  // which looks like this
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder}_(leading)_________________
-  //     C |    M \       \\                   C \
-  //       |       \    StoreN/P[mo_release]  CastP2X
-  //       |    Bot \    /
-  //       |       MergeMem
-  //       |         /
-  //      MemBarVolatile (card mark)
-  //     C |  ||    M |
-  //       | LoadB    |
-  //       |   |      |
-  //       | Cmp      |\
-  //       | /        | \
-  //       If         |  \
-  //       | \        |   \
-  // IfFalse  IfTrue  |    \
-  //       \     / \  |     \
-  //        \   / StoreCM    |
-  //         \ /      |      |
-  //        Region   . . .   |
-  //          | \           /
-  //          |  . . .  \  / Bot
-  //          |       MergeMem
-  //          |          |
-  //       {MemBarCPUOrder}
-  //        MemBarVolatile (trailing)
-  //
-  // The first MergeMem merges the AliasIdxBot Mem slice from the
-  // leading membar and the oopptr Mem slice from the Store into the
-  // card mark membar. The trailing MergeMem merges the AliasIdxBot
-  // Mem slice from the card mark membar and the AliasIdxRaw slice
-  // from the StoreCM into the trailing membar (n.b. the latter
-  // proceeds via a Phi associated with the If region).
-  //
-  // The graph for a CAS varies slightly, the difference being
-  // that the StoreN/P node is replaced by a CompareAndSwapP/N node
-  // and the trailing MemBarVolatile by a MemBarCPUOrder +
-  // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder_(leading)_______________
-  //     C |    M \       \\                C \
-  //       |       \    CompareAndSwapN/P  CastP2X
-  //       |        \      |
-  //       |         \   SCMemProj
-  //       |      Bot \   /
-  //       |        MergeMem
-  //       |         /
-  //      MemBarVolatile (card mark)
-  //     C |  ||    M |
-  //       | LoadB    |
-  //       |   |      |
-  //       | Cmp      |\
-  //       | /        | \
-  //       If         |  \
-  //       | \        |   \
-  // IfFalse  IfTrue  |    \
-  //       \     / \  |     \
-  //        \   / StoreCM    |
-  //         \ /      |      |
-  //        Region   . . .   |
-  //          | \           /
-  //          |  . . .  \  / Bot
-  //          |       MergeMem
-  //          |          |
-  //        MemBarCPUOrder
-  //        MemBarVolatile (trailing)
-  //
-  //
-  // G1 is quite a lot more complicated. The nodes inserted on behalf
-  // of G1 may comprise: a pre-write graph which adds the old value to
-  // the SATB queue; the releasing store itself; and, finally, a
-  // post-write graph which performs a card mark.
-  //
-  // The pre-write graph may be omitted, but only when the put is
-  // writing to a newly allocated (young gen) object and then only if
-  // there is a direct memory chain to the Initialize node for the
-  // object allocation. This will not happen for a volatile put since
-  // any memory chain passes through the leading membar.
-  //
-  // The pre-write graph includes a series of 3 If tests. The outermost
-  // If tests whether SATB is enabled (no else case). The next If tests
-  // whether the old value is non-NULL (no else case). The third tests
-  // whether the SATB queue index is > 0, if so updating the queue. The
-  // else case for this third If calls out to the runtime to allocate a
-  // new queue buffer.
-  //
-  // So with G1 the pre-write and releasing store subgraph looks like
-  // this (the nested Ifs are omitted).
-  //
-  //  MemBarRelease
-  // {MemBarCPUOrder}_(leading)___________
-  //     C |  ||  M \   M \    M \  M \ . . .
-  //       | LoadB   \  LoadL  LoadN   \
-  //       | /        \                 \
-  //       If         |\                 \
-  //       | \        | \                 \
-  //  IfFalse  IfTrue |  \                 \
-  //       |     |    |   \                 |
-  //       |     If   |   /\                |
-  //       |     |          \               |
-  //       |                 \              |
-  //       |    . . .         \             |
-  //       | /       | /       |            |
-  //      Region  Phi[M]       |            |
-  //       | \       |         |            |
-  //       |  \_____ | ___     |            |
-  //     C | C \     |   C \ M |            |
-  //       | CastP2X | StoreN/P[mo_release] |
-  //       |         |         |            |
-  //     C |       M |       M |          M |
-  //        \        |         |           /
-  //                  . . .
-  //          (post write subtree elided)
-  //                    . . .
-  //             C \         M /
-  //                \         /
-  //             {MemBarCPUOrder}
-  //              MemBarVolatile (trailing)
-  //
-  // n.b. the LoadB in this subgraph is not the card read -- it's a
-  // read of the SATB queue active flag.
-  //
-  // The G1 post-write subtree is also optional, this time when the
-  // new value being written is either null or can be identified as a
-  // newly allocated (young gen) object with no intervening control
-  // flow. The latter cannot happen but the former may, in which case
-  // the card mark membar is omitted and the memory feeds form the
-  // leading membar and the SToreN/P are merged direct into the
-  // trailing membar as per the normal subgraph. So, the only special
-  // case which arises is when the post-write subgraph is generated.
-  //
-  // The kernel of the post-write G1 subgraph is the card mark itself
-  // which includes a card mark memory barrier (MemBarVolatile), a
-  // card test (LoadB), and a conditional update (If feeding a
-  // StoreCM). These nodes are surrounded by a series of nested Ifs
-  // which try to avoid doing the card mark. The top level If skips if
-  // the object reference does not cross regions (i.e. it tests if
-  // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
-  // need not be recorded. The next If, which skips on a NULL value,
-  // may be absent (it is not generated if the type of value is >=
-  // OopPtr::NotNull). The 3rd If skips writes to young regions (by
-  // checking if card_val != young).  n.b. although this test requires
-  // a pre-read of the card it can safely be done before the StoreLoad
-  // barrier. However that does not bypass the need to reread the card
-  // after the barrier. A final, 4th If tests if the card is already
-  // marked.
-  //
-  //                (pre-write subtree elided)
-  //        . . .                  . . .    . . .  . . .
-  //        C |                    M |     M |    M |
-  //       Region                  Phi[M] StoreN    |
-  //          |                     / \      |      |
-  //         / \_______            /   \     |      |
-  //      C / C \      . . .            \    |      |
-  //       If   CastP2X . . .            |   |      |
-  //       / \                           |   |      |
-  //      /   \                          |   |      |
-  // IfFalse IfTrue                      |   |      |
-  //   |       |                         |   |     /|
-  //   |       If                        |   |    / |
-  //   |      / \                        |   |   /  |
-  //   |     /   \                        \  |  /   |
-  //   | IfFalse IfTrue                   MergeMem  |
-  //   |  . . .    / \                       /      |
-  //   |          /   \                     /       |
-  //   |     IfFalse IfTrue                /        |
-  //   |      . . .    |                  /         |
-  //   |               If                /          |
-  //   |               / \              /           |
-  //   |              /   \            /            |
-  //   |         IfFalse IfTrue       /             |
-  //   |           . . .   |         /              |
-  //   |                    \       /               |
-  //   |                     \     /                |
-  //   |             MemBarVolatile__(card mark)    |
-  //   |                ||   C |  M \  M \          |
-  //   |               LoadB   If    |    |         |
-  //   |                      / \    |    |         |
-  //   |                     . . .   |    |         |
-  //   |                          \  |    |        /
-  //   |                        StoreCM   |       /
-  //   |                          . . .   |      /
-  //   |                        _________/      /
-  //   |                       /  _____________/
-  //   |   . . .       . . .  |  /            /
-  //   |    |                 | /   _________/
-  //   |    |               Phi[M] /        /
-  //   |    |                 |   /        /
-  //   |    |                 |  /        /
-  //   |  Region  . . .     Phi[M]  _____/
-  //   |    /                 |    /
-  //   |                      |   /
-  //   | . . .   . . .        |  /
-  //   | /                    | /
-  // Region           |  |  Phi[M]
-  //   |              |  |  / Bot
-  //    \            MergeMem
-  //     \            /
-  //    {MemBarCPUOrder}
-  //     MemBarVolatile
-  //
-  // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
-  // from the leading membar and the oopptr Mem slice from the Store
-  // into the card mark membar i.e. the memory flow to the card mark
-  // membar still looks like a normal graph.
-  //
-  // The trailing MergeMem merges an AliasIdxBot Mem slice with other
-  // Mem slices (from the StoreCM and other card mark queue stores).
-  // However in this case the AliasIdxBot Mem slice does not come
-  // direct from the card mark membar. It is merged through a series
-  // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
-  // from the leading membar with the Mem feed from the card mark
-  // membar. Each Phi corresponds to one of the Ifs which may skip
-  // around the card mark membar. So when the If implementing the NULL
-  // value check has been elided the total number of Phis is 2
-  // otherwise it is 3.
-  //
-  // The CAS graph when using G1GC also includes a pre-write subgraph
-  // and an optional post-write subgraph. The same variations are
-  // introduced as for CMS with conditional card marking i.e. the
-  // StoreP/N is swapped for a CompareAndSwapP/N with a following
-  // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
-  // MemBarAcquire pair. There may be an extra If test introduced in
-  // the CAS case, when the boolean result of the CAS is tested by the
-  // caller. In that case an extra Region and AliasIdxBot Phi may be
-  // introduced before the MergeMem
-  //
-  // So, the upshot is that in all cases the subgraph will include a
-  // *normal* memory subgraph betwen the leading membar and its child
-  // membar: either a normal volatile put graph including a releasing
-  // StoreX and terminating with a trailing volatile membar or card
-  // mark volatile membar; or a normal CAS graph including a
-  // CompareAndSwapX + SCMemProj pair and terminating with a card mark
-  // volatile membar or a trailing cpu order and acquire membar
-  // pair. If the child membar is not a (volatile) card mark membar
-  // then it marks the end of the volatile put or CAS subgraph. If the
-  // child is a card mark membar then the normal subgraph will form
-  // part of a larger volatile put or CAS subgraph if and only if the
-  // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
-  // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
-  // Phi nodes merging the leading barrier memory flow (for G1).
-  //
-  // The predicates controlling generation of instructions for store
-  // and barrier nodes employ a few simple helper functions (described
-  // below) which identify the presence or absence of all these
-  // subgraph configurations and provide a means of traversing from
-  // one node in the subgraph to another.
-
   // is_CAS(int opcode)
   //
   // return true if opcode is one of the possible CompareAndSwapX
@@ -1910,674 +1298,7 @@
   // traverse when searching from a card mark membar for the merge mem
   // feeding a trailing membar or vice versa
 
-  int max_phis()
-  {
-    if (UseG1GC) {
-      return 4;
-    } else if (UseConcMarkSweepGC && UseCondCardMark) {
-      return 1;
-    } else {
-      return 0;
-    }
-  }
-
-  // leading_to_normal
-  //
-  // graph traversal helper which detects the normal case Mem feed
-  // from a release membar (or, optionally, its cpuorder child) to a
-  // dependent volatile or acquire membar i.e. it ensures that one of
-  // the following 3 Mem flow subgraphs is present.
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder} {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile {trailing or card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire {trailing}
-  //
-  // if the correct configuration is present returns the trailing
-  // or cardmark membar otherwise NULL.
-  //
-  // the input membar is expected to be either a cpuorder membar or a
-  // release membar. in the latter case it should not have a cpu membar
-  // child.
-  //
-  // the returned value may be a card mark or trailing membar
-  //
-
-  MemBarNode *leading_to_normal(MemBarNode *leading)
-  {
-    assert((leading->Opcode() == Op_MemBarRelease ||
-	    leading->Opcode() == Op_MemBarCPUOrder),
-	   "expecting a volatile or cpuroder membar!");
-
-    // check the mem flow
-    ProjNode *mem = leading->proj_out(TypeFunc::Memory);
-
-    if (!mem) {
-      return NULL;
-    }
-
-    Node *x = NULL;
-    StoreNode * st = NULL;
-    LoadStoreNode *cas = NULL;
-    MergeMemNode *mm = NULL;
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      if (x->is_MergeMem()) {
-	if (mm != NULL) {
-	  return NULL;
-	}
-	// two merge mems is one too many
-	mm = x->as_MergeMem();
-      } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
-	// two releasing stores/CAS nodes is one too many
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	st = x->as_Store();
-      } else if (is_CAS(x->Opcode())) {
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	cas = x->as_LoadStore();
-      }
-    }
-
-    // must have a store or a cas
-    if (!st && !cas) {
-      return NULL;
-    }
-
-    // must have a merge
-    if (!mm) {
-      return NULL;
-    }
-
-    Node *feed = NULL;
-    if (cas) {
-      // look for an SCMemProj
-      for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
-	x = cas->fast_out(i);
-        if (x->Opcode() == Op_SCMemProj) {
-	  feed = x;
-	  break;
-	}
-      }
-      if (feed == NULL) {
-	return NULL;
-      }
-    } else {
-      feed = st;
-    }
-    // ensure the feed node feeds the existing mergemem;
-    for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-      x = feed->fast_out(i);
-      if (x == mm) {
-        break;
-      }
-    }
-    if (x != mm) {
-      return NULL;
-    }
-
-    MemBarNode *mbar = NULL;
-    // ensure the merge feeds to the expected type of membar
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar()) {
-        if (x->Opcode() == Op_MemBarCPUOrder) {
-          // with a store any cpu order membar should precede a
-          // trailing volatile membar. with a cas it should precede a
-          // trailing acquire membar. in either case try to skip to
-          // that next membar
-	  MemBarNode *y =  x->as_MemBar();
-	  y = child_membar(y);
-	  if (y != NULL) {
-            // skip to this new membar to do the check
-	    x = y;
-	  }
-          
-        }
-	if (x->Opcode() == Op_MemBarVolatile) {
-	  mbar = x->as_MemBar();
-          // for a volatile store this can be either a trailing membar
-          // or a card mark membar. for a cas it must be a card mark
-          // membar
-          guarantee(cas == NULL || is_card_mark_membar(mbar),
-                    "in CAS graph volatile membar must be a card mark");
-	} else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
-	  mbar = x->as_MemBar();
-	}
-	break;
-      }
-    }
-
-    return mbar;
-  }
-
-  // normal_to_leading
-  //
-  // graph traversal helper which detects the normal case Mem feed
-  // from either a card mark or a trailing membar to a preceding
-  // release membar (optionally its cpuorder child) i.e. it ensures
-  // that one of the following 3 Mem flow subgraphs is present.
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder} {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile {trailing or card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire {trailing}
-  //
-  // this predicate checks for the same flow as the previous predicate
-  // but starting from the bottom rather than the top.
-  //
-  // if the configuration is present returns the cpuorder member for
-  // preference or when absent the release membar otherwise NULL.
-  //
-  // n.b. the input membar is expected to be a MemBarVolatile but
-  // need not be a card mark membar.
-
-  MemBarNode *normal_to_leading(const MemBarNode *barrier)
-  {
-    // input must be a volatile membar
-    assert((barrier->Opcode() == Op_MemBarVolatile ||
-	    barrier->Opcode() == Op_MemBarAcquire),
-	   "expecting a volatile or an acquire membar");
-    bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
-
-    // if we have an intervening cpu order membar then start the
-    // search from it
-    
-    Node *x = parent_membar(barrier);
-
-    if (x == NULL) {
-      // stick with the original barrier
-      x = (Node *)barrier;
-    } else if (x->Opcode() != Op_MemBarCPUOrder) {
-      // any other barrier means this is not the graph we want
-      return NULL;
-    }
-
-    // the Mem feed to the membar should be a merge
-    x = x ->in(TypeFunc::Memory);
-    if (!x->is_MergeMem())
-      return NULL;
-
-    MergeMemNode *mm = x->as_MergeMem();
-
-    // the merge should get its Bottom mem feed from the leading membar
-    x = mm->in(Compile::AliasIdxBot);
-
-    // ensure this is a non control projection
-    if (!x->is_Proj() || x->is_CFG()) {
-      return NULL;
-    }
-    // if it is fed by a membar that's the one we want
-    x = x->in(0);
-
-    if (!x->is_MemBar()) {
-      return NULL;
-    }
-
-    MemBarNode *leading = x->as_MemBar();
-    // reject invalid candidates
-    if (!leading_membar(leading)) {
-      return NULL;
-    }
-
-    // ok, we have a leading membar, now for the sanity clauses
-
-    // the leading membar must feed Mem to a releasing store or CAS
-    ProjNode *mem = leading->proj_out(TypeFunc::Memory);
-    StoreNode *st = NULL;
-    LoadStoreNode *cas = NULL;
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
-	// two stores or CASes is one too many
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	st = x->as_Store();
-      } else if (is_CAS(x->Opcode())) {
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	cas = x->as_LoadStore();
-      }
-    }
-
-    // we cannot have both a store and a cas
-    if (st == NULL && cas == NULL) {
-      // we have neither -- this is not a normal graph
-      return NULL;
-    }
-    if (st == NULL) {
-      // if we started from a volatile membar and found a CAS then the
-      // original membar ought to be for a card mark
-      guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
-                "unexpected volatile barrier (i.e. not card mark) in CAS graph");
-      // check that the CAS feeds the merge we used to get here via an
-      // intermediary SCMemProj
-      Node *scmemproj = NULL;
-      for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
-        x = cas->fast_out(i);
-        if (x->Opcode() == Op_SCMemProj) {
-          scmemproj = x;
-          break;
-        }
-      }
-      if (scmemproj == NULL) {
-        return NULL;
-      }
-      for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
-        x = scmemproj->fast_out(i);
-        if (x == mm) {
-          return leading;
-        }
-      }
-    } else {
-      // we should not have found a store if we started from an acquire
-      guarantee(!barrier_is_acquire,
-                "unexpected trailing acquire barrier in volatile store graph");
-
-      // the store should feed the merge we used to get here
-      for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
-	if (st->fast_out(i) == mm) {
-	  return leading;
-	}
-      }
-    }
-
-    return NULL;
-  }
-
-  // card_mark_to_trailing
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a card mark volatile membar to a trailing membar i.e. it
-  // ensures that one of the following three GC post-write Mem flow
-  // subgraphs is present.
-  //
-  // 1)
-  //     . . .
-  //       |
-  //   MemBarVolatile (card mark)
-  //      |          |
-  //      |        StoreCM
-  //      |          |
-  //      |        . . .
-  //  Bot |  /
-  //   MergeMem
-  //      |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //                                 
-  //
-  // 2)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |
-  //    |\       . . .
-  //    | \        |
-  //    |  \  MemBarVolatile (card mark)
-  //    |   \   |     |
-  //     \   \  |   StoreCM    . . .
-  //      \   \ |
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
-  //     Bot |   /
-  //       MergeMem
-  //         |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //
-  // 3)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |\
-  //    | \
-  //    |  \      . . .
-  //    |   \       |
-  //    |\   \  MemBarVolatile (card mark)
-  //    | \   \   |     |
-  //    |  \   \  |   StoreCM    . . .
-  //    |   \   \ |
-  //     \   \  Phi
-  //      \   \ /
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
-  //     Bot |   /
-  //       MergeMem
-  //         |
-  //         |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //
-  // 4)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |\
-  //    | \
-  //    |  \
-  //    |   \
-  //    |\   \
-  //    | \   \
-  //    |  \   \        . . .
-  //    |   \   \         |
-  //    |\   \   \   MemBarVolatile (card mark)
-  //    | \   \   \   /   |
-  //    |  \   \   \ /  StoreCM    . . .
-  //    |   \   \  Phi
-  //     \   \   \ /
-  //      \   \  Phi
-  //       \   \ /
-  //        \  Phi
-  //         \ /
-  //         Phi  . . .
-  //      Bot |   /
-  //       MergeMem
-  //          |
-  //          |
-  //    MemBarCPUOrder
-  //    MemBarAcquire {trailing}
-  //
-  // configuration 1 is only valid if UseConcMarkSweepGC &&
-  // UseCondCardMark
-  //
-  // configuration 2, is only valid if UseConcMarkSweepGC &&
-  // UseCondCardMark or if UseG1GC
-  //
-  // configurations 3 and 4 are only valid if UseG1GC.
-  //
-  // if a valid configuration is present returns the trailing membar
-  // otherwise NULL.
-  //
-  // n.b. the supplied membar is expected to be a card mark
-  // MemBarVolatile i.e. the caller must ensure the input node has the
-  // correct operand and feeds Mem to a StoreCM node
-
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
-  {
-    // input must be a card mark volatile membar
-    assert(is_card_mark_membar(barrier), "expecting a card mark membar");
-
-    Node *feed = barrier->proj_out(TypeFunc::Memory);
-    Node *x;
-    MergeMemNode *mm = NULL;
-
-    const int MAX_PHIS = max_phis(); // max phis we will search through
-    int phicount = 0;                // current search count
-
-    bool retry_feed = true;
-    while (retry_feed) {
-      // see if we have a direct MergeMem feed
-      for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	x = feed->fast_out(i);
-	// the correct Phi will be merging a Bot memory slice
-	if (x->is_MergeMem()) {
-	  mm = x->as_MergeMem();
-	  break;
-	}
-      }
-      if (mm) {
-	retry_feed = false;
-      } else if (phicount++ < MAX_PHIS) {
-	// the barrier may feed indirectly via one or two Phi nodes
-	PhiNode *phi = NULL;
-	for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	  x = feed->fast_out(i);
-	  // the correct Phi will be merging a Bot memory slice
-	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
-	    phi = x->as_Phi();
-	    break;
-	  }
-	}
-	if (!phi) {
-	  return NULL;
-	}
-	// look for another merge below this phi
-	feed = phi;
-      } else {
-	// couldn't find a merge
-	return NULL;
-      }
-    }
-
-    // sanity check this feed turns up as the expected slice
-    guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
-
-    MemBarNode *trailing = NULL;
-    // be sure we have a trailing membar fed by the merge
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar()) {
-        // if this is an intervening cpu order membar skip to the
-        // following membar
-        if (x->Opcode() == Op_MemBarCPUOrder) {
-          MemBarNode *y =  x->as_MemBar();
-          y = child_membar(y);
-          if (y != NULL) {
-            x = y;
-          }
-        }
-        if (x->Opcode() == Op_MemBarVolatile ||
-            x->Opcode() == Op_MemBarAcquire) {
-          trailing = x->as_MemBar();
-        }
-        break;
-      }
-    }
-
-    return trailing;
-  }
-
-  // trailing_to_card_mark
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a trailing volatile membar to a preceding card mark volatile
-  // membar i.e. it identifies whether one of the three possible extra
-  // GC post-write Mem flow subgraphs is present
-  //
-  // this predicate checks for the same flow as the previous predicate
-  // but starting from the bottom rather than the top.
-  //
-  // if the configuration is present returns the card mark membar
-  // otherwise NULL
-  //
-  // n.b. the supplied membar is expected to be a trailing
-  // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
-  // input node has the correct opcode
-
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
-  {
-    assert(trailing->Opcode() == Op_MemBarVolatile ||
-           trailing->Opcode() == Op_MemBarAcquire,
-	   "expecting a volatile or acquire membar");
-    assert(!is_card_mark_membar(trailing),
-	   "not expecting a card mark membar");
-
-    Node *x = (Node *)trailing;
-
-    // look for a preceding cpu order membar
-    MemBarNode *y = parent_membar(x->as_MemBar());
-    if (y != NULL) {
-      // make sure it is a cpu order membar
-      if (y->Opcode() != Op_MemBarCPUOrder) {
-        // this is nto the graph we were looking for
-        return NULL;
-      }
-      // start the search from here
-      x = y;
-    }
-
-    // the Mem feed to the membar should be a merge
-    x = x->in(TypeFunc::Memory);
-    if (!x->is_MergeMem()) {
-      return NULL;
-    }
-
-    MergeMemNode *mm = x->as_MergeMem();
-
-    x = mm->in(Compile::AliasIdxBot);
-    // with G1 we may possibly see a Phi or two before we see a Memory
-    // Proj from the card mark membar
-
-    const int MAX_PHIS = max_phis(); // max phis we will search through
-    int phicount = 0;                    // current search count
-
-    bool retry_feed = !x->is_Proj();
-
-    while (retry_feed) {
-      if (x->is_Phi() && phicount++ < MAX_PHIS) {
-	PhiNode *phi = x->as_Phi();
-	ProjNode *proj = NULL;
-	PhiNode *nextphi = NULL;
-	bool found_leading = false;
-	for (uint i = 1; i < phi->req(); i++) {
-	  x = phi->in(i);
-	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
-	    nextphi = x->as_Phi();
-	  } else if (x->is_Proj()) {
-	    int opcode = x->in(0)->Opcode();
-	    if (opcode == Op_MemBarVolatile) {
-	      proj = x->as_Proj();
-	    } else if (opcode == Op_MemBarRelease ||
-		       opcode == Op_MemBarCPUOrder) {
-	      // probably a leading membar
-	      found_leading = true;
-	    }
-	  }
-	}
-	// if we found a correct looking proj then retry from there
-	// otherwise we must see a leading and a phi or this the
-	// wrong config
-	if (proj != NULL) {
-	  x = proj;
-	  retry_feed = false;
-	} else if (found_leading && nextphi != NULL) {
-	  // retry from this phi to check phi2
-	  x = nextphi;
-	} else {
-	  // not what we were looking for
-	  return NULL;
-	}
-      } else {
-	return NULL;
-      }
-    }
-    // the proj has to come from the card mark membar
-    x = x->in(0);
-    if (!x->is_MemBar()) {
-      return NULL;
-    }
-
-    MemBarNode *card_mark_membar = x->as_MemBar();
-
-    if (!is_card_mark_membar(card_mark_membar)) {
-      return NULL;
-    }
-
-    return card_mark_membar;
-  }
-
-  // trailing_to_leading
-  //
-  // graph traversal helper which checks the Mem flow up the graph
-  // from a (non-card mark) trailing membar attempting to locate and
-  // return an associated leading membar. it first looks for a
-  // subgraph in the normal configuration (relying on helper
-  // normal_to_leading). failing that it then looks for one of the
-  // possible post-write card mark subgraphs linking the trailing node
-  // to a the card mark membar (relying on helper
-  // trailing_to_card_mark), and then checks that the card mark membar
-  // is fed by a leading membar (once again relying on auxiliary
-  // predicate normal_to_leading).
-  //
-  // if the configuration is valid returns the cpuorder member for
-  // preference or when absent the release membar otherwise NULL.
-  //
-  // n.b. the input membar is expected to be either a volatile or
-  // acquire membar but in the former case must *not* be a card mark
-  // membar.
-
-  MemBarNode *trailing_to_leading(const MemBarNode *trailing)
-  {
-    assert((trailing->Opcode() == Op_MemBarAcquire ||
-	    trailing->Opcode() == Op_MemBarVolatile),
-	   "expecting an acquire or volatile membar");
-    assert((trailing->Opcode() != Op_MemBarVolatile ||
-	    !is_card_mark_membar(trailing)),
-	   "not expecting a card mark membar");
-
-    MemBarNode *leading = normal_to_leading(trailing);
-
-    if (leading) {
-      return leading;
-    }
-
-    // there is no normal path from trailing to leading membar. see if
-    // we can arrive via a card mark membar
-
-    MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
-
-    if (!card_mark_membar) {
-      return NULL;
-    }
-
-    return normal_to_leading(card_mark_membar);
-  }
-
-  // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
+// predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 
 bool unnecessary_acquire(const Node *barrier)
 {
@@ -2588,40 +1309,19 @@
     return false;
   }
 
-  // a volatile read derived from bytecode (or also from an inlined
-  // SHA field read via LibraryCallKit::load_field_from_object)
-  // manifests as a LoadX[mo_acquire] followed by an acquire membar
-  // with a bogus read dependency on it's preceding load. so in those
-  // cases we will find the load node at the PARMS offset of the
-  // acquire membar.  n.b. there may be an intervening DecodeN node.
-
-  Node *x = barrier->lookup(TypeFunc::Parms);
-  if (x) {
-    // we are starting from an acquire and it has a fake dependency
-    //
-    // need to check for
-    //
-    //   LoadX[mo_acquire]
-    //   {  |1   }
-    //   {DecodeN}
-    //      |Parms
-    //   MemBarAcquire*
-    //
-    // where * tags node we were passed
-    // and |k means input k
-    if (x->is_DecodeNarrowPtr()) {
-      x = x->in(1);
-    }
-
-    return (x->is_Load() && x->as_Load()->is_acquire());
+  MemBarNode* mb = barrier->as_MemBar();
+
+  if (mb->trailing_load()) {
+    return true;
   }
 
-  // other option for unnecessary membar is that it is a trailing node
-  // belonging to a CAS
-
-  MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
-
-  return leading != NULL;
+  if (mb->trailing_load_store()) {
+    Node* load_store = mb->in(MemBarNode::Precedent);
+    assert(load_store->is_LoadStore(), "unexpected graph shape");
+    return is_CAS(load_store->Opcode());
+  }
+
+  return false;
 }
 
 bool needs_acquiring_load(const Node *n)
@@ -2634,45 +1334,7 @@
 
   LoadNode *ld = n->as_Load();
 
-  if (!ld->is_acquire()) {
-    return false;
-  }
-
-  // check if this load is feeding an acquire membar
-  //
-  //   LoadX[mo_acquire]
-  //   {  |1   }
-  //   {DecodeN}
-  //      |Parms
-  //   MemBarAcquire*
-  //
-  // where * tags node we were passed
-  // and |k means input k
-
-  Node *start = ld;
-  Node *mbacq = NULL;
-
-  // if we hit a DecodeNarrowPtr we reset the start node and restart
-  // the search through the outputs
- restart:
-
-  for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
-    Node *x = start->fast_out(i);
-    if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
-      mbacq = x;
-    } else if (!mbacq &&
-	       (x->is_DecodeNarrowPtr() ||
-		(x->is_Mach() && x->Opcode() == Op_DecodeN))) {
-      start = x;
-      goto restart;
-    }
-  }
-
-  if (mbacq) {
-    return true;
-  }
-
-  return false;
+  return ld->is_acquire();
 }
 
 bool unnecessary_release(const Node *n)
@@ -2686,32 +1348,27 @@
     return false;
   }
 
-  // if there is a dependent CPUOrder barrier then use that as the
-  // leading
-
   MemBarNode *barrier = n->as_MemBar();
-  // check for an intervening cpuorder membar
-  MemBarNode *b = child_membar(barrier);
-  if (b && b->Opcode() == Op_MemBarCPUOrder) {
-    // ok, so start the check from the dependent cpuorder barrier
-    barrier = b;
+  if (!barrier->leading()) {
+    return false;
+  } else {
+    Node* trailing = barrier->trailing_membar();
+    MemBarNode* trailing_mb = trailing->as_MemBar();
+    assert(trailing_mb->trailing(), "Not a trailing membar?");
+    assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
+
+    Node* mem = trailing_mb->in(MemBarNode::Precedent);
+    if (mem->is_Store()) {
+      assert(mem->as_Store()->is_release(), "");
+      assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
+      return true;
+    } else {
+      assert(mem->is_LoadStore(), "");
+      assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
+      return is_CAS(mem->Opcode());
+    }
   }
-
-  // must start with a normal feed
-  MemBarNode *child_barrier = leading_to_normal(barrier);
-
-  if (!child_barrier) {
-    return false;
-  }
-
-  if (!is_card_mark_membar(child_barrier)) {
-    // this is the trailing membar and we are done
-    return true;
-  }
-
-  // must be sure this card mark feeds a trailing membar
-  MemBarNode *trailing = card_mark_to_trailing(child_barrier);
-  return (trailing != NULL);
+  return false;
 }
 
 bool unnecessary_volatile(const Node *n)
@@ -2724,17 +1381,18 @@
 
   MemBarNode *mbvol = n->as_MemBar();
 
-  // first we check if this is part of a card mark. if so then we have
-  // to generate a StoreLoad barrier
-
-  if (is_card_mark_membar(mbvol)) {
-      return false;
+  bool release = mbvol->trailing_store();
+  assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
+#ifdef ASSERT
+  if (release) {
+    Node* leading = mbvol->leading_membar();
+    assert(leading->Opcode() == Op_MemBarRelease, "");
+    assert(leading->as_MemBar()->leading_store(), "");
+    assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
   }
-
-  // ok, if it's not a card mark then we still need to check if it is
-  // a trailing membar of a volatile put graph.
-
-  return (trailing_to_leading(mbvol) != NULL);
+#endif
+
+  return release;
 }
 
 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
@@ -2749,53 +1407,7 @@
 
   StoreNode *st = n->as_Store();
 
-  // the store must be marked as releasing
-  if (!st->is_release()) {
-    return false;
-  }
-
-  // the store must be fed by a membar
-
-  Node *x = st->lookup(StoreNode::Memory);
-
-  if (! x || !x->is_Proj()) {
-    return false;
-  }
-
-  ProjNode *proj = x->as_Proj();
-
-  x = proj->lookup(0);
-
-  if (!x || !x->is_MemBar()) {
-    return false;
-  }
-
-  MemBarNode *barrier = x->as_MemBar();
-
-  // if the barrier is a release membar or a cpuorder mmebar fed by a
-  // release membar then we need to check whether that forms part of a
-  // volatile put graph.
-
-  // reject invalid candidates
-  if (!leading_membar(barrier)) {
-    return false;
-  }
-
-  // does this lead a normal subgraph?
-  MemBarNode *mbvol = leading_to_normal(barrier);
-
-  if (!mbvol) {
-    return false;
-  }
-
-  // all done unless this is a card mark
-  if (!is_card_mark_membar(mbvol)) {
-    return true;
-  }
-
-  // we found a card mark -- just make sure we have a trailing barrier
-
-  return (card_mark_to_trailing(mbvol) != NULL);
+  return st->trailing_membar() != NULL;
 }
 
 // predicate controlling translation of CAS
@@ -2809,48 +1421,9 @@
     return false;
   }
 
-  // CAS nodes only ought to turn up in inlined unsafe CAS operations
-#ifdef ASSERT
-  LoadStoreNode *st = n->as_LoadStore();
-
-  // the store must be fed by a membar
-
-  Node *x = st->lookup(StoreNode::Memory);
-
-  assert (x && x->is_Proj(), "CAS not fed by memory proj!");
-
-  ProjNode *proj = x->as_Proj();
-
-  x = proj->lookup(0);
-
-  assert (x && x->is_MemBar(), "CAS not fed by membar!");
-
-  MemBarNode *barrier = x->as_MemBar();
-
-  // the barrier must be a cpuorder mmebar fed by a release membar
-
-  guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
-            "CAS not fed by cpuorder membar!");
-
-  MemBarNode *b = parent_membar(barrier);
-  assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
-	  "CAS not fed by cpuorder+release membar pair!");
-
-  // does this lead a normal subgraph?
-  MemBarNode *mbar = leading_to_normal(barrier);
-
-  guarantee(mbar != NULL, "CAS not embedded in normal graph!");
-
-  // if this is a card mark membar check we have a trailing acquire
-
-  if (is_card_mark_membar(mbar)) {
-    mbar = card_mark_to_trailing(mbar);
-  }
-
-  guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
-
-  guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
-#endif // ASSERT
+  LoadStoreNode* ldst = n->as_LoadStore();
+  assert(ldst->trailing_membar() != NULL, "expected trailing membar");
+
   // so we can just return true here
   return true;
 }
@@ -11050,6 +9623,24 @@
   ins_pipe(imac_reg_reg);
 %}
 
+// Combined Integer Multiply & Neg
+
+instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
+  match(Set dst (MulI (SubI zero src1) src2));
+  match(Set dst (MulI src1 (SubI zero src2)));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "mneg  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mnegw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(imac_reg_reg);
+%}
+
 // Combined Long Multiply & Add/Sub
 
 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
@@ -11084,6 +9675,24 @@
   ins_pipe(lmac_reg_reg);
 %}
 
+// Combined Long Multiply & Neg
+
+instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
+  match(Set dst (MulL (SubL zero src1) src2));
+  match(Set dst (MulL src1 (SubL zero src2)));
+
+  ins_cost(INSN_COST * 5);
+  format %{ "mneg  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mneg(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(lmac_reg_reg);
+%}
+
 // Integer Divide
 
 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -2167,6 +2167,9 @@
   Register length  = op->length()->as_register();
   Register tmp = op->tmp()->as_register();
 
+  __ resolve(ACCESS_READ, src);
+  __ resolve(ACCESS_WRITE, dst);
+
   CodeStub* stub = op->stub();
   int flags = op->flags();
   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -2510,6 +2513,7 @@
       scratch = op->scratch_opr()->as_register();
     }
     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
+    __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
     // add debug info for NullPointerException only if one is possible
     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
     if (op->info() != NULL) {
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -941,6 +941,10 @@
         index = tmp;
       }
 
+      if (is_updateBytes) {
+        base_op = access_resolve(ACCESS_READ, base_op);
+      }
+
       if (offset) {
         LIR_Opr tmp = new_pointer_register();
         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1019,6 +1023,10 @@
         index = tmp;
       }
 
+      if (is_updateBytes) {
+        base_op = access_resolve(ACCESS_READ, base_op);
+      }
+
       if (offset) {
         LIR_Opr tmp = new_pointer_register();
         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -3038,6 +3038,9 @@
   Register length  = op->length()->as_register();
   Register tmp = op->tmp()->as_register();
 
+  __ resolve(ACCESS_READ, src);
+  __ resolve(ACCESS_WRITE, dst);
+
   CodeStub* stub = op->stub();
   int flags = op->flags();
   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -3476,6 +3479,7 @@
       scratch = op->scratch_opr()->as_register();
     }
     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
+    __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
     // add debug info for NullPointerException only if one is possible
     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
     if (op->info() != NULL) {
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -997,6 +997,10 @@
       }
 #endif
 
+      if (is_updateBytes) {
+        base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
+      }
+
       LIR_Address* a = new LIR_Address(base_op,
                                        index,
                                        offset,
@@ -1054,7 +1058,7 @@
     constant_aOffset = result_aOffset->as_jlong();
     result_aOffset = LIR_OprFact::illegalOpr;
   }
-  LIR_Opr result_a = a.result();
+  LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
 
   long constant_bOffset = 0;
   LIR_Opr result_bOffset = bOffset.result();
@@ -1062,7 +1066,7 @@
     constant_bOffset = result_bOffset->as_jlong();
     result_bOffset = LIR_OprFact::illegalOpr;
   }
-  LIR_Opr result_b = b.result();
+  LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
 
 #ifndef _LP64
   result_a = new_register(T_INT);
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -23,10 +23,12 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "code/codeBlob.hpp"
 #include "gc/z/zBarrier.inline.hpp"
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
 #include "gc/z/zBarrierSetRuntime.hpp"
+#include "memory/resourceArea.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -3123,6 +3123,16 @@
   }
 }
 
+void MacroAssembler::push_zmm(XMMRegister reg) {
+  lea(rsp, Address(rsp, -64)); // Use lea to not affect flags
+  evmovdqul(Address(rsp, 0), reg, Assembler::AVX_512bit);
+}
+
+void MacroAssembler::pop_zmm(XMMRegister reg) {
+  evmovdqul(reg, Address(rsp, 0), Assembler::AVX_512bit);
+  lea(rsp, Address(rsp, 64)); // Use lea to not affect flags
+}
+
 void MacroAssembler::fremr(Register tmp) {
   save_rax(tmp);
   { Label L;
@@ -3848,33 +3858,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpeqb(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpeqb(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpeqb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpeqb(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3886,33 +3888,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpeqw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpeqw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpeqw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpeqw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3921,13 +3915,11 @@
   if (dst_enc < 16) {
     Assembler::pcmpestri(dst, src, imm8);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpestri(xmm0, src, imm8);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3937,33 +3929,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpestri(dst, src, imm8);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpestri(xmm0, src, imm8);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpestri(dst, xmm0, imm8);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpestri(xmm1, xmm0, imm8);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3975,33 +3959,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pmovzxbw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pmovzxbw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pmovzxbw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pmovzxbw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4012,13 +3988,11 @@
   } else if (dst_enc < 16) {
     Assembler::pmovzxbw(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pmovzxbw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4027,12 +4001,10 @@
   if (src_enc < 16) {
     Assembler::pmovmskb(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pmovmskb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4042,31 +4014,23 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::ptest(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::ptest(xmm0, src);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::ptest(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::ptest(xmm1, xmm0);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4221,13 +4185,11 @@
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
       evmovdqul(xmm0, src, Assembler::AVX_512bit);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       evmovdqul(xmm0, nds, Assembler::AVX_512bit);
       vandps(xmm0, xmm0, negate_field, vector_len);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -4258,13 +4220,11 @@
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
       evmovdqul(xmm0, src, Assembler::AVX_512bit);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       evmovdqul(xmm0, nds, Assembler::AVX_512bit);
       vandpd(xmm0, xmm0, negate_field, vector_len);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -4294,16 +4254,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4353,16 +4311,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4404,33 +4360,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpbroadcastw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpbroadcastw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpbroadcastw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpbroadcastw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4442,33 +4390,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpcmpeqb(dst, nds, src, vector_len);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpcmpeqb(dst, dst, xmm0, vector_len);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4480,33 +4420,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpcmpeqw(dst, nds, src, vector_len);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpcmpeqw(dst, dst, xmm0, vector_len);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4517,13 +4449,11 @@
   } else if (dst_enc < 16) {
     Assembler::vpmovzxbw(dst, src, vector_len);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpmovzxbw(xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4532,12 +4462,10 @@
   if (src_enc < 16) {
     Assembler::vpmovmskb(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpmovmskb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4566,16 +4494,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4625,16 +4551,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4684,16 +4608,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4751,8 +4673,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4760,8 +4681,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4819,8 +4739,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4828,8 +4747,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4887,8 +4805,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4896,8 +4813,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4928,31 +4844,23 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vptest(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vptest(xmm0, src);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vptest(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vptest(xmm1, xmm0);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4966,45 +4874,35 @@
       if (dst_enc < 16) {
         Assembler::punpcklbw(dst, src);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, xmm0);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       }
     } else {
       if ((src_enc < 16) && (dst_enc < 16)) {
         Assembler::punpcklbw(dst, src);
       } else if (src_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, src);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else if (dst_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, src, Assembler::AVX_512bit);
         Assembler::punpcklbw(dst, xmm0);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+        push_zmm(xmm0);
+        push_zmm(xmm1);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         evmovdqul(xmm1, src, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, xmm1);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm1);
+        pop_zmm(xmm0);
       }
     }
   } else {
@@ -5020,12 +4918,10 @@
     if (dst_enc < 16) {
       Assembler::pshufd(dst, src, mode);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       Assembler::pshufd(xmm0, src, mode);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -5040,45 +4936,35 @@
       if (dst_enc < 16) {
         Assembler::pshuflw(dst, src, mode);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, xmm0, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       }
     } else {
       if ((src_enc < 16) && (dst_enc < 16)) {
         Assembler::pshuflw(dst, src, mode);
       } else if (src_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, src, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else if (dst_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, src, Assembler::AVX_512bit);
         Assembler::pshuflw(dst, xmm0, mode);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+        push_zmm(xmm0);
+        push_zmm(xmm1);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         evmovdqul(xmm1, src, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, xmm1, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm1);
+        pop_zmm(xmm0);
       }
     }
   } else {
@@ -5166,13 +5052,11 @@
   if (VM_Version::supports_avx512novl() &&
       (nds_upper_bank || dst_upper_bank)) {
     if (dst_upper_bank) {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       movflt(xmm0, nds);
       vxorps(xmm0, xmm0, src, Assembler::AVX_128bit);
       movflt(dst, xmm0);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     } else {
       movflt(dst, nds);
       vxorps(dst, dst, src, Assembler::AVX_128bit);
@@ -5190,13 +5074,11 @@
   if (VM_Version::supports_avx512novl() &&
       (nds_upper_bank || dst_upper_bank)) {
     if (dst_upper_bank) {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       movdbl(xmm0, nds);
       vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit);
       movdbl(dst, xmm0);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     } else {
       movdbl(dst, nds);
       vxorpd(dst, dst, src, Assembler::AVX_128bit);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -482,6 +482,10 @@
   // from register xmm0. Otherwise, the value is stored from the FPU stack.
   void store_double(Address dst);
 
+  // Save/restore ZMM (512bit) register on stack.
+  void push_zmm(XMMRegister reg);
+  void pop_zmm(XMMRegister reg);
+
   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
   void push_fTOS();
 
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Aug 23 22:07:27 2018 +0200
@@ -317,18 +317,6 @@
 // Singleton class for TLS pointer
 reg_class ptr_r15_reg(R15, R15_H);
 
-// The registers which can be used for
-// a thread local safepoint poll
-// * R12 is reserved for heap base
-// * R13 cannot be encoded for addressing without an offset byte
-// * R15 is reserved for the JavaThread
-reg_class ptr_rex_reg(R8,  R8_H,
-                      R9,  R9_H,
-                      R10, R10_H,
-                      R11, R11_H,
-                      R14, R14_H);
-
-
 // Class for all long registers (excluding RSP)
 reg_class long_reg_with_rbp(RAX, RAX_H,
                             RDX, RDX_H,
@@ -3557,16 +3545,6 @@
   interface(REG_INTER);
 %}
 
-operand rex_RegP()
-%{
-  constraint(ALLOC_IN_RC(ptr_rex_reg));
-  match(RegP);
-  match(rRegP);
-
-  format %{ %}
-  interface(REG_INTER);
-%}
-
 operand rRegL()
 %{
   constraint(ALLOC_IN_RC(long_reg));
@@ -12360,7 +12338,7 @@
   ins_pipe(ialu_reg_mem);
 %}
 
-instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
+instruct safePoint_poll_tls(rFlagsReg cr, rRegP poll)
 %{
   predicate(SafepointMechanism::uses_thread_local_poll());
   match(SafePoint poll);
@@ -12369,13 +12347,12 @@
   format %{ "testl  rax, [$poll]\t"
             "# Safepoint: poll for GC" %}
   ins_cost(125);
-  size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */
+  size(4); /* setting an explicit size will cause debug builds to assert if size is incorrect */
   ins_encode %{
     __ relocate(relocInfo::poll_type);
     address pre_pc = __ pc();
     __ testl(rax, Address($poll$$Register, 0));
-    address post_pc = __ pc();
-    guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]");
+    assert(nativeInstruction_at(pre_pc)->is_safepoint_poll(), "must emit test %%eax [reg]");
   %}
   ins_pipe(ialu_reg_mem);
 %}
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1006,7 +1006,7 @@
 
   InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
 
-  if (!dyno->is_anonymous()) {
+  if (!dyno->is_unsafe_anonymous()) {
     if (_klasses_got[dyno_data->_got_index] != dyno) {
       // compile-time class different from runtime class, fail and deoptimize
       sweep_dependent_methods(holder_data);
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -362,7 +362,7 @@
   log->print(" aot='%2d'", _heap->dso_id());
 }
 
-void AOTCompiledMethod::log_state_change() const {
+void AOTCompiledMethod::log_state_change(oop cause) const {
   if (LogCompilation) {
     ResourceMark m;
     if (xtty != NULL) {
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -193,7 +193,7 @@
   virtual int comp_level() const { return CompLevel_aot; }
   virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
   virtual void log_identity(xmlStream* stream) const;
-  virtual void log_state_change() const;
+  virtual void log_state_change(oop cause = NULL) const;
   virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; });
   virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
   virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
--- a/src/hotspot/share/aot/aotLoader.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/aot/aotLoader.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -42,7 +42,7 @@
 #define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
 
 void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // don't even bother
     return;
   }
@@ -54,7 +54,7 @@
 }
 
 uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // don't even bother
     return 0;
   }
--- a/src/hotspot/share/c1/c1_Decorators.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_Decorators.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -34,9 +34,5 @@
 // Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
 // needs to be masked.
 const DecoratorSet C1_MASK_BOOLEAN   = DECORATOR_LAST << 2;
-// The C1_WRITE_ACCESS decorator is used to mark writing accesses.
-const DecoratorSet C1_WRITE_ACCESS   = DECORATOR_LAST << 3;
-// The C1_READ_ACCESS decorator is used to mark reading accesses.
-const DecoratorSet C1_READ_ACCESS    = DECORATOR_LAST << 4;
 
 #endif // SHARE_VM_C1_C1_DECORATORS_HPP
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1844,8 +1844,8 @@
   // invoke-special-super
   if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer()) {
     ciInstanceKlass* sender_klass =
-          calling_klass->is_anonymous() ? calling_klass->host_klass() :
-                                          calling_klass;
+          calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() :
+                                                 calling_klass;
     if (sender_klass->is_interface()) {
       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
       Value receiver = state()->stack_at(index);
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1615,7 +1615,7 @@
 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
-  decorators |= C1_READ_ACCESS;
+  decorators |= ACCESS_READ;
   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
   if (access.is_raw()) {
     _barrier_set->BarrierSetC1::load_at(access, result);
@@ -1626,7 +1626,7 @@
 
 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
                                LIR_Opr addr, LIR_Opr result) {
-  decorators |= C1_READ_ACCESS;
+  decorators |= ACCESS_READ;
   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
   access.set_resolved_addr(addr);
   if (access.is_raw()) {
@@ -1639,7 +1639,7 @@
 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
-  decorators |= C1_WRITE_ACCESS;
+  decorators |= ACCESS_WRITE;
   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
   if (access.is_raw()) {
     _barrier_set->BarrierSetC1::store_at(access, value);
@@ -1650,9 +1650,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1664,9 +1664,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1678,9 +1678,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1690,6 +1690,15 @@
   }
 }
 
+LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
+  // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+  if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+    decorators |= ACCESS_READ | ACCESS_WRITE;
+  }
+
+  return _barrier_set->resolve(this, decorators, obj);
+}
+
 void LIRGenerator::do_LoadField(LoadField* x) {
   bool needs_patching = x->needs_patching();
   bool is_volatile = x->field()->is_volatile();
@@ -1767,11 +1776,12 @@
   if (GenerateRangeChecks) {
     CodeEmitInfo* info = state_for(x);
     CodeStub* stub = new RangeCheckStub(info, index.result());
+    LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
     if (index.result()->is_constant()) {
-      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
+      cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
       __ branch(lir_cond_belowEqual, T_INT, stub);
     } else {
-      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
+      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
                   java_nio_Buffer::limit_offset(), T_INT, info);
       __ branch(lir_cond_aboveEqual, T_INT, stub);
     }
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -300,6 +300,8 @@
   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
                                LIRItem& base, LIRItem& offset, LIRItem& value);
 
+  LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);
+
   // These need to guarantee JMM volatile semantics are preserved on each platform
   // and requires one implementation per architecture.
   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -55,8 +55,9 @@
 #include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/frame.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/threadCritical.hpp"
--- a/src/hotspot/share/ci/ciField.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/ci/ciField.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -31,7 +31,7 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 
 // ciField
@@ -222,9 +222,9 @@
   // Even if general trusting is disabled, trust system-built closures in these packages.
   if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
     return true;
-  // Trust VM anonymous classes. They are private API (sun.misc.Unsafe) and can't be serialized,
-  // so there is no hacking of finals going on with them.
-  if (holder->is_anonymous())
+  // Trust VM unsafe anonymous classes. They are private API (jdk.internal.misc.Unsafe)
+  // and can't be serialized, so there is no hacking of finals going on with them.
+  if (holder->is_unsafe_anonymous())
     return true;
   // Trust final fields in all boxed classes
   if (holder->is_box_klass())
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -33,7 +33,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/fieldStreams.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 
@@ -62,7 +62,7 @@
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
   _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
-  _is_anonymous = ik->is_anonymous();
+  _is_unsafe_anonymous = ik->is_unsafe_anonymous();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
   _has_injected_fields = -1;
   _implementor = NULL; // we will fill these lazily
@@ -73,13 +73,13 @@
   // InstanceKlass are created for both weak and strong metadata.  Ensuring this metadata
   // alive covers the cases where there are weak roots without performance cost.
   oop holder = ik->holder_phantom();
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // Though ciInstanceKlass records class loader oop, it's not enough to keep
-    // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
-    // It is enough to record a ciObject, since cached elements are never removed
+    // VM unsafe anonymous classes alive (loader == NULL). Klass holder should
+    // be used instead. It is enough to record a ciObject, since cached elements are never removed
     // during ciObjectFactory lifetime. ciObjectFactory itself is created for
     // every compilation and lives for the whole duration of the compilation.
-    assert(holder != NULL, "holder of anonymous class is the mirror which is never null");
+    assert(holder != NULL, "holder of unsafe anonymous class is the mirror which is never null");
     (void)CURRENT_ENV->get_object(holder);
   }
 
@@ -122,7 +122,7 @@
   _has_nonstatic_fields = false;
   _nonstatic_fields = NULL;
   _has_injected_fields = -1;
-  _is_anonymous = false;
+  _is_unsafe_anonymous = false;
   _loader = loader;
   _protection_domain = protection_domain;
   _is_shared = false;
@@ -615,12 +615,12 @@
   return impl;
 }
 
-ciInstanceKlass* ciInstanceKlass::host_klass() {
+ciInstanceKlass* ciInstanceKlass::unsafe_anonymous_host() {
   assert(is_loaded(), "must be loaded");
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     VM_ENTRY_MARK
-    Klass* host_klass = get_instanceKlass()->host_klass();
-    return CURRENT_ENV->get_instance_klass(host_klass);
+    Klass* unsafe_anonymous_host = get_instanceKlass()->unsafe_anonymous_host();
+    return CURRENT_ENV->get_instance_klass(unsafe_anonymous_host);
   }
   return NULL;
 }
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
   bool                   _has_nonstatic_concrete_methods;
-  bool                   _is_anonymous;
+  bool                   _is_unsafe_anonymous;
 
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
@@ -179,8 +179,8 @@
     return _has_nonstatic_concrete_methods;
   }
 
-  bool is_anonymous() {
-    return _is_anonymous;
+  bool is_unsafe_anonymous() {
+    return _is_unsafe_anonymous;
   }
 
   ciInstanceKlass* get_canonical_holder(int offset);
@@ -260,7 +260,7 @@
     return NULL;
   }
 
-  ciInstanceKlass* host_klass();
+  ciInstanceKlass* unsafe_anonymous_host();
 
   bool can_be_instantiated() {
     assert(is_loaded(), "must be loaded");
--- a/src/hotspot/share/ci/ciReplay.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/ci/ciReplay.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/method.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/hotspot/share/classfile/classFileParser.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -2091,7 +2091,7 @@
   // Privileged code can use all annotations.  Other code silently drops some.
   const bool privileged = loader_data->is_the_null_class_loader_data() ||
                           loader_data->is_platform_class_loader_data() ||
-                          loader_data->is_anonymous();
+                          loader_data->is_unsafe_anonymous();
   switch (sid) {
     case vmSymbols::VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): {
       if (_location != _in_method)  break;  // only allow for methods
@@ -5600,7 +5600,7 @@
 
   ik->set_this_class_index(_this_class_index);
 
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     // _this_class_index is a CONSTANT_Class entry that refers to this
     // anonymous class itself. If this class needs to refer to its own methods or
     // fields, it would use a CONSTANT_MethodRef, etc, which would reference
@@ -5616,9 +5616,9 @@
   ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods);
   ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods);
 
-  if (_host_klass != NULL) {
-    assert (ik->is_anonymous(), "should be the same");
-    ik->set_host_klass(_host_klass);
+  if (_unsafe_anonymous_host != NULL) {
+    assert (ik->is_unsafe_anonymous(), "should be the same");
+    ik->set_unsafe_anonymous_host(_unsafe_anonymous_host);
   }
 
   // Set PackageEntry for this_klass
@@ -5769,15 +5769,15 @@
   debug_only(ik->verify();)
 }
 
-// For an anonymous class that is in the unnamed package, move it to its host class's
+// For an unsafe anonymous class that is in the unnamed package, move it to its host class's
 // package by prepending its host class's package name to its class name and setting
 // its _class_name field.
-void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
+void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS) {
   ResourceMark rm(THREAD);
   assert(strrchr(_class_name->as_C_string(), '/') == NULL,
-         "Anonymous class should not be in a package");
+         "Unsafe anonymous class should not be in a package");
   const char* host_pkg_name =
-    ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
+    ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL);
 
   if (host_pkg_name != NULL) {
     size_t host_pkg_len = strlen(host_pkg_name);
@@ -5787,7 +5787,7 @@
     // Copy host package name and trailing /.
     strncpy(new_anon_name, host_pkg_name, host_pkg_len);
     new_anon_name[host_pkg_len] = '/';
-    // Append anonymous class name. The anonymous class name can contain odd
+    // Append unsafe anonymous class name. The unsafe anonymous class name can contain odd
     // characters.  So, do a strncpy instead of using sprintf("%s...").
     strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
 
@@ -5802,19 +5802,19 @@
 // nothing.  If the anonymous class is in the unnamed package then move it to its
 // host's package.  If the classes are in different packages then throw an IAE
 // exception.
-void ClassFileParser::fix_anonymous_class_name(TRAPS) {
-  assert(_host_klass != NULL, "Expected an anonymous class");
+void ClassFileParser::fix_unsafe_anonymous_class_name(TRAPS) {
+  assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class");
 
   const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
                                                _class_name->utf8_length(), '/');
   if (anon_last_slash == NULL) {  // Unnamed package
-    prepend_host_package_name(_host_klass, CHECK);
+    prepend_host_package_name(_unsafe_anonymous_host, CHECK);
   } else {
-    if (!_host_klass->is_same_class_package(_host_klass->class_loader(), _class_name)) {
+    if (!_unsafe_anonymous_host->is_same_class_package(_unsafe_anonymous_host->class_loader(), _class_name)) {
       ResourceMark rm(THREAD);
       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
         err_msg("Host class %s and anonymous class %s are in different packages",
-        _host_klass->name()->as_C_string(), _class_name->as_C_string()));
+        _unsafe_anonymous_host->name()->as_C_string(), _class_name->as_C_string()));
     }
   }
 }
@@ -5834,14 +5834,14 @@
                                  Symbol* name,
                                  ClassLoaderData* loader_data,
                                  Handle protection_domain,
-                                 const InstanceKlass* host_klass,
+                                 const InstanceKlass* unsafe_anonymous_host,
                                  GrowableArray<Handle>* cp_patches,
                                  Publicity pub_level,
                                  TRAPS) :
   _stream(stream),
   _requested_name(name),
   _loader_data(loader_data),
-  _host_klass(host_klass),
+  _unsafe_anonymous_host(unsafe_anonymous_host),
   _cp_patches(cp_patches),
   _num_patched_klasses(0),
   _max_num_patched_klasses(0),
@@ -6149,8 +6149,8 @@
   // if this is an anonymous class fix up its name if it's in the unnamed
   // package.  Otherwise, throw IAE if it is in a different package than
   // its host class.
-  if (_host_klass != NULL) {
-    fix_anonymous_class_name(CHECK);
+  if (_unsafe_anonymous_host != NULL) {
+    fix_unsafe_anonymous_class_name(CHECK);
   }
 
   // Verification prevents us from creating names with dots in them, this
@@ -6175,9 +6175,9 @@
         warning("DumpLoadedClassList and CDS are not supported in exploded build");
         DumpLoadedClassList = NULL;
       } else if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
-          _host_klass == NULL) {
+                 _unsafe_anonymous_host == NULL) {
         // Only dump the classes that can be stored into CDS archive.
-        // Anonymous classes such as generated LambdaForm classes are also not included.
+        // Unsafe anonymous classes such as generated LambdaForm classes are also not included.
         oop class_loader = _loader_data->class_loader();
         ResourceMark rm(THREAD);
         bool skip = false;
--- a/src/hotspot/share/classfile/classFileParser.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classFileParser.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -82,7 +82,7 @@
   const Symbol* _requested_name;
   Symbol* _class_name;
   mutable ClassLoaderData* _loader_data;
-  const InstanceKlass* _host_klass;
+  const InstanceKlass* _unsafe_anonymous_host;
   GrowableArray<Handle>* _cp_patches; // overrides for CP entries
   int _num_patched_klasses;
   int _max_num_patched_klasses;
@@ -173,8 +173,8 @@
                                   ConstantPool* cp,
                                   TRAPS);
 
-  void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
-  void fix_anonymous_class_name(TRAPS);
+  void prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS);
+  void fix_unsafe_anonymous_class_name(TRAPS);
 
   void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
   void set_klass(InstanceKlass* instance);
@@ -501,7 +501,7 @@
                   Symbol* name,
                   ClassLoaderData* loader_data,
                   Handle protection_domain,
-                  const InstanceKlass* host_klass,
+                  const InstanceKlass* unsafe_anonymous_host,
                   GrowableArray<Handle>* cp_patches,
                   Publicity pub_level,
                   TRAPS);
@@ -524,10 +524,10 @@
   u2 this_class_index() const { return _this_class_index; }
   u2 super_class_index() const { return _super_class_index; }
 
-  bool is_anonymous() const { return _host_klass != NULL; }
+  bool is_unsafe_anonymous() const { return _unsafe_anonymous_host != NULL; }
   bool is_interface() const { return _access_flags.is_interface(); }
 
-  const InstanceKlass* host_klass() const { return _host_klass; }
+  const InstanceKlass* unsafe_anonymous_host() const { return _unsafe_anonymous_host; }
   const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
   ClassLoaderData* loader_data() const { return _loader_data; }
   const Symbol* class_name() const { return _class_name; }
--- a/src/hotspot/share/classfile/classLoader.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1400,7 +1400,7 @@
                                                            name,
                                                            loader_data,
                                                            protection_domain,
-                                                           NULL, // host_klass
+                                                           NULL, // unsafe_anonymous_host
                                                            NULL, // cp_patches
                                                            THREAD);
   if (HAS_PENDING_EXCEPTION) {
@@ -1443,8 +1443,8 @@
   assert(DumpSharedSpaces, "sanity");
   assert(stream != NULL, "sanity");
 
-  if (ik->is_anonymous()) {
-    // We do not archive anonymous classes.
+  if (ik->is_unsafe_anonymous()) {
+    // We do not archive unsafe anonymous classes.
     return;
   }
 
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -141,16 +141,16 @@
   _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id, CATCH);
 }
 
-ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
+ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
   _metaspace(NULL),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
                             Monitor::_safepoint_check_never)),
-  _unloading(false), _is_anonymous(is_anonymous),
+  _unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous),
   _modified_oops(true), _accumulated_modified_oops(false),
-  // An anonymous class loader data doesn't have anything to keep
-  // it from being unloaded during parsing of the anonymous class.
+  // An unsafe anonymous class loader data doesn't have anything to keep
+  // it from being unloaded during parsing of the unsafe anonymous class.
   // The null-class-loader should always be kept alive.
-  _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
+  _keep_alive((is_unsafe_anonymous || h_class_loader.is_null()) ? 1 : 0),
   _claimed(0),
   _handles(),
   _klasses(NULL), _packages(NULL), _modules(NULL), _unnamed_module(NULL), _dictionary(NULL),
@@ -164,14 +164,14 @@
     _class_loader_klass = h_class_loader->klass();
   }
 
-  if (!is_anonymous) {
-    // The holder is initialized later for anonymous classes, and before calling anything
+  if (!is_unsafe_anonymous) {
+    // The holder is initialized later for unsafe anonymous classes, and before calling anything
     // that call class_loader().
     initialize_holder(h_class_loader);
 
-    // A ClassLoaderData created solely for an anonymous class should never have a
+    // A ClassLoaderData created solely for an unsafe anonymous class should never have a
     // ModuleEntryTable or PackageEntryTable created for it. The defining package
-    // and module for an anonymous class will be found in its host class.
+    // and module for an unsafe anonymous class will be found in its host class.
     _packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
     if (h_class_loader.is_null()) {
       // Create unnamed module for boot loader
@@ -287,20 +287,20 @@
   return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0;
 }
 
-// Anonymous classes have their own ClassLoaderData that is marked to keep alive
+// Unsafe anonymous classes have their own ClassLoaderData that is marked to keep alive
 // while the class is being parsed, and if the class appears on the module fixup list.
-// Due to the uniqueness that no other class shares the anonymous class' name or
-// ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while
+// Due to the uniqueness that no other class shares the unsafe anonymous class' name or
+// ClassLoaderData, no other non-GC thread has knowledge of the unsafe anonymous class while
 // it is being defined, therefore _keep_alive is not volatile or atomic.
 void ClassLoaderData::inc_keep_alive() {
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     assert(_keep_alive >= 0, "Invalid keep alive increment count");
     _keep_alive++;
   }
 }
 
 void ClassLoaderData::dec_keep_alive() {
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     assert(_keep_alive > 0, "Invalid keep alive decrement count");
     _keep_alive--;
   }
@@ -402,20 +402,20 @@
   // Do not need to record dependency if the dependency is to a class whose
   // class loader data is never freed.  (i.e. the dependency's class loader
   // is one of the three builtin class loaders and the dependency is not
-  // anonymous.)
+  // unsafe anonymous.)
   if (to_cld->is_permanent_class_loader_data()) {
     return;
   }
 
   oop to;
-  if (to_cld->is_anonymous()) {
-    // Just return if an anonymous class is attempting to record a dependency
-    // to itself.  (Note that every anonymous class has its own unique class
+  if (to_cld->is_unsafe_anonymous()) {
+    // Just return if an unsafe anonymous class is attempting to record a dependency
+    // to itself.  (Note that every unsafe anonymous class has its own unique class
     // loader data.)
     if (to_cld == from_cld) {
       return;
     }
-    // Anonymous class dependencies are through the mirror.
+    // Unsafe anonymous class dependencies are through the mirror.
     to = k->java_mirror();
   } else {
     to = to_cld->class_loader();
@@ -640,7 +640,7 @@
 const int _default_loader_dictionary_size = 107;
 
 Dictionary* ClassLoaderData::create_dictionary() {
-  assert(!is_anonymous(), "anonymous class loader data do not have a dictionary");
+  assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary");
   int size;
   bool resizable = false;
   if (_the_null_class_loader_data == NULL) {
@@ -677,7 +677,7 @@
 
 // Unloading support
 bool ClassLoaderData::is_alive() const {
-  bool alive = keep_alive()         // null class loader and incomplete anonymous klasses.
+  bool alive = keep_alive()         // null class loader and incomplete unsafe anonymous klasses.
       || (_holder.peek() != NULL);  // and not cleaned by the GC weak handle processing.
 
   return alive;
@@ -767,13 +767,13 @@
 
 // Returns true if this class loader data is for the app class loader
 // or a user defined system class loader.  (Note that the class loader
-// data may be anonymous.)
+// data may be unsafe anonymous.)
 bool ClassLoaderData::is_system_class_loader_data() const {
   return SystemDictionary::is_system_class_loader(class_loader());
 }
 
 // Returns true if this class loader data is for the platform class loader.
-// (Note that the class loader data may be anonymous.)
+// (Note that the class loader data may be unsafe anonymous.)
 bool ClassLoaderData::is_platform_class_loader_data() const {
   return SystemDictionary::is_platform_class_loader(class_loader());
 }
@@ -781,7 +781,7 @@
 // Returns true if the class loader for this class loader data is one of
 // the 3 builtin (boot application/system or platform) class loaders,
 // including a user-defined system class loader.  Note that if the class
-// loader data is for an anonymous class then it may get freed by a GC
+// loader data is for an unsafe anonymous class then it may get freed by a GC
 // even if its class loader is one of these loaders.
 bool ClassLoaderData::is_builtin_class_loader_data() const {
   return (is_boot_class_loader_data() ||
@@ -790,10 +790,10 @@
 }
 
 // Returns true if this class loader data is a class loader data
-// that is not ever freed by a GC.  It must be one of the builtin
-// class loaders and not anonymous.
+// that is not ever freed by a GC.  It must be the CLD for one of the builtin
+// class loaders and not the CLD for an unsafe anonymous class.
 bool ClassLoaderData::is_permanent_class_loader_data() const {
-  return is_builtin_class_loader_data() && !is_anonymous();
+  return is_builtin_class_loader_data() && !is_unsafe_anonymous();
 }
 
 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
@@ -810,8 +810,8 @@
       if (this == the_null_class_loader_data()) {
         assert (class_loader() == NULL, "Must be");
         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
-      } else if (is_anonymous()) {
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
+      } else if (is_unsafe_anonymous()) {
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
       } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
       } else {
@@ -962,8 +962,8 @@
   }
 }
 
-// These anonymous class loaders are to contain classes used for JSR292
-ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) {
+// These CLDs are to contain unsafe anonymous classes used for JSR292
+ClassLoaderData* ClassLoaderData::unsafe_anonymous_class_loader_data(Handle loader) {
   // Add a new class loader data to the graph.
   return ClassLoaderDataGraph::add(loader, true);
 }
@@ -1005,8 +1005,8 @@
     // loader data: 0xsomeaddr of 'bootstrap'
     out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
   }
-  if (is_anonymous()) {
-    out->print(" anonymous");
+  if (is_unsafe_anonymous()) {
+    out->print(" unsafe anonymous");
   }
 }
 
@@ -1014,7 +1014,7 @@
 void ClassLoaderData::print_on(outputStream* out) const {
   out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {",
               p2i(this), p2i(_class_loader.ptr_raw()), loader_name_and_id());
-  if (is_anonymous()) out->print(" anonymous");
+  if (is_unsafe_anonymous()) out->print(" unsafe anonymous");
   if (claimed()) out->print(" claimed");
   if (is_unloading()) out->print(" unloading");
   out->print(" metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
@@ -1032,8 +1032,8 @@
   assert_locked_or_safepoint(_metaspace_lock);
   oop cl = class_loader();
 
-  guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
-  guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_anonymous(), "must be");
+  guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same");
+  guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
 
   // Verify the integrity of the allocated space.
   if (metaspace_or_null() != NULL) {
@@ -1069,14 +1069,14 @@
 
 // Add a new class loader data node to the list.  Assign the newly created
 // ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anonymous) {
+ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
   NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
                                      // ClassLoaderData in the graph since the CLD
                                      // contains oops in _handles that must be walked.
 
-  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
+  ClassLoaderData* cld = new ClassLoaderData(loader, is_unsafe_anonymous);
 
-  if (!is_anonymous) {
+  if (!is_unsafe_anonymous) {
     // First, Atomically set it
     ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
     if (old != NULL) {
@@ -1109,8 +1109,8 @@
   } while (true);
 }
 
-ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
-  ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous);
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
+  ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous);
   // Initialize _name and _name_and_id after the loader data is added to the
   // CLDG because adding the Symbol for _name and _name_and_id might safepoint.
   if (loader.not_null()) {
@@ -1119,28 +1119,6 @@
   return loader_data;
 }
 
-void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->oops_do(f, must_claim);
-  }
-}
-
-void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    if (cld->keep_alive()) {
-      cld->oops_do(f, must_claim);
-    }
-  }
-}
-
-void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
-  if (ClassUnloading) {
-    keep_alive_oops_do(f, must_claim);
-  } else {
-    oops_do(f, must_claim);
-  }
-}
-
 void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
   for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
     cl->do_cld(cld);
@@ -1166,13 +1144,9 @@
   }
 }
 
-void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
-  roots_cld_do(cl, NULL);
-}
-
 void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
   if (ClassUnloading) {
-    keep_alive_cld_do(cl);
+    roots_cld_do(cl, NULL);
   } else {
     cld_do(cl);
   }
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -92,29 +92,24 @@
   static volatile size_t  _num_instance_classes;
   static volatile size_t  _num_array_classes;
 
-  static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous);
-  static ClassLoaderData* add(Handle class_loader, bool anonymous);
+  static ClassLoaderData* add_to_graph(Handle class_loader, bool is_unsafe_anonymous);
+  static ClassLoaderData* add(Handle class_loader, bool is_unsafe_anonymous);
 
  public:
   static ClassLoaderData* find_or_create(Handle class_loader);
   static void clean_module_and_package_info();
   static void purge();
   static void clear_claimed_marks();
-  // oops do
-  static void oops_do(OopClosure* f, bool must_claim);
-  static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
-  static void always_strong_oops_do(OopClosure* blk, bool must_claim);
-  // cld do
+  // Iteration through CLDG inside a safepoint; GC support
   static void cld_do(CLDClosure* cl);
   static void cld_unloading_do(CLDClosure* cl);
   static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
-  static void keep_alive_cld_do(CLDClosure* cl);
   static void always_strong_cld_do(CLDClosure* cl);
   // klass do
   // Walking classes through the ClassLoaderDataGraph include array classes.  It also includes
   // classes that are allocated but not loaded, classes that have errors, and scratch classes
   // for redefinition.  These classes are removed during the next class unloading.
-  // Walking the ClassLoaderDataGraph also includes anonymous classes.
+  // Walking the ClassLoaderDataGraph also includes unsafe anonymous classes.
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
   static void methods_do(void f(Method*));
@@ -238,16 +233,17 @@
                                     // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
-  bool _is_anonymous;      // if this CLD is for an anonymous class
+  bool _is_unsafe_anonymous; // CLD is dedicated to one class and that class determines the CLDs lifecycle.
+                             // For example, an unsafe anonymous class.
 
   // Remembered sets support for the oops in the class loader data.
   bool _modified_oops;             // Card Table Equivalent (YC/CMS support)
   bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
 
   s2 _keep_alive;          // if this CLD is kept alive.
-                           // Used for anonymous classes and the boot class
+                           // Used for unsafe anonymous classes and the boot class
                            // loader. _keep_alive does not need to be volatile or
-                           // atomic since there is one unique CLD per anonymous class.
+                           // atomic since there is one unique CLD per unsafe anonymous class.
 
   volatile int _claimed;   // true if claimed, for example during GC traces.
                            // To avoid applying oop closure more than once.
@@ -283,7 +279,7 @@
   void set_next(ClassLoaderData* next) { _next = next; }
   ClassLoaderData* next() const        { return _next; }
 
-  ClassLoaderData(Handle h_class_loader, bool is_anonymous);
+  ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous);
   ~ClassLoaderData();
 
   // The CLD are not placed in the Heap, so the Card Table or
@@ -337,7 +333,7 @@
 
   Mutex* metaspace_lock() const { return _metaspace_lock; }
 
-  bool is_anonymous() const { return _is_anonymous; }
+  bool is_unsafe_anonymous() const { return _is_unsafe_anonymous; }
 
   static void init_null_class_loader_data();
 
@@ -346,15 +342,15 @@
   }
 
   // Returns true if this class loader data is for the system class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   bool is_system_class_loader_data() const;
 
   // Returns true if this class loader data is for the platform class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   bool is_platform_class_loader_data() const;
 
   // Returns true if this class loader data is for the boot class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   inline bool is_boot_class_loader_data() const;
 
   bool is_builtin_class_loader_data() const;
@@ -372,7 +368,7 @@
     return _unloading;
   }
 
-  // Used to refcount an anonymous class's CLD in order to
+  // Used to refcount an unsafe anonymous class's CLD in order to
   // indicate their aliveness.
   void inc_keep_alive();
   void dec_keep_alive();
@@ -412,7 +408,7 @@
 
   static ClassLoaderData* class_loader_data(oop loader);
   static ClassLoaderData* class_loader_data_or_null(oop loader);
-  static ClassLoaderData* anonymous_class_loader_data(Handle loader);
+  static ClassLoaderData* unsafe_anonymous_class_loader_data(Handle loader);
 
   // Returns Klass* of associated class loader, or NULL if associated loader is 'bootstrap'.
   // Also works if unloading.
--- a/src/hotspot/share/classfile/classLoaderExt.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -50,6 +50,7 @@
 
 jshort ClassLoaderExt::_app_class_paths_start_index = ClassLoaderExt::max_classpath_index;
 jshort ClassLoaderExt::_app_module_paths_start_index = ClassLoaderExt::max_classpath_index;
+jshort ClassLoaderExt::_max_used_path_index = 0;
 bool ClassLoaderExt::_has_app_classes = false;
 bool ClassLoaderExt::_has_platform_classes = false;
 
@@ -242,6 +243,9 @@
     classloader_type = ClassLoader::PLATFORM_LOADER;
     ClassLoaderExt::set_has_platform_classes();
   }
+  if (classpath_index > ClassLoaderExt::max_used_path_index()) {
+    ClassLoaderExt::set_max_used_path_index(classpath_index);
+  }
   result->set_shared_classpath_index(classpath_index);
   result->set_class_loader_type(classloader_type);
 }
@@ -294,7 +298,7 @@
                                                            name,
                                                            loader_data,
                                                            protection_domain,
-                                                           NULL, // host_klass
+                                                           NULL, // unsafe_anonymous_host
                                                            NULL, // cp_patches
                                                            THREAD);
 
--- a/src/hotspot/share/classfile/classLoaderExt.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -49,6 +49,8 @@
   static jshort _app_class_paths_start_index;
   // index of first modular JAR in shared modulepath entry table
   static jshort _app_module_paths_start_index;
+  // the largest path index being used during CDS dump time
+  static jshort _max_used_path_index;
 
   static bool _has_app_classes;
   static bool _has_platform_classes;
@@ -91,6 +93,12 @@
 
   static jshort app_module_paths_start_index() { return _app_module_paths_start_index; }
 
+  static jshort max_used_path_index() { return _max_used_path_index; }
+
+  static void set_max_used_path_index(jshort used_index) {
+    _max_used_path_index = used_index;
+  }
+
   static void init_paths_start_index(jshort app_start) {
     _app_class_paths_start_index = app_start;
   }
--- a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -128,7 +128,7 @@
 
 class LoaderTreeNode : public ResourceObj {
 
-  // We walk the CLDG and, for each CLD which is non-anonymous, add
+  // We walk the CLDG and, for each CLD which is non-unsafe_anonymous, add
   // a tree node.
   // To add a node we need its parent node; if the parent node does not yet
   // exist - because we have not yet encountered the CLD for the parent loader -
@@ -219,7 +219,7 @@
       if (print_classes) {
         if (_classes != NULL) {
           for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) {
-            // Non-anonymous classes should live in the primary CLD of its loader
+            // Non-unsafe anonymous classes should live in the primary CLD of its loader
             assert(lci->_cld == _cld, "must be");
 
             branchtracker.print(st);
@@ -252,12 +252,12 @@
           for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) {
             branchtracker.print(st);
             if (lci == _anon_classes) { // first iteration
-              st->print("%*s ", indentation, "Anonymous Classes:");
+              st->print("%*s ", indentation, "Unsafe Anonymous Classes:");
             } else {
               st->print("%*s ", indentation, "");
             }
             st->print("%s", lci->_klass->external_name());
-            // For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
+            // For unsafe anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
             assert(lci->_cld != _cld, "must be");
             if (verbose) {
               st->print("  (Loader Data: " PTR_FORMAT ")", p2i(lci->_cld));
@@ -266,7 +266,7 @@
           }
           branchtracker.print(st);
           st->print("%*s ", indentation, "");
-          st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
+          st->print_cr("(%u unsafe anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
 
           // Empty line
           branchtracker.print(st);
@@ -318,14 +318,14 @@
     _next = info;
   }
 
-  void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) {
-    LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes;
+  void add_classes(LoadedClassInfo* first_class, int num_classes, bool is_unsafe_anonymous) {
+    LoadedClassInfo** p_list_to_add_to = is_unsafe_anonymous ? &_anon_classes : &_classes;
     // Search tail.
     while ((*p_list_to_add_to) != NULL) {
       p_list_to_add_to = &(*p_list_to_add_to)->_next;
     }
     *p_list_to_add_to = first_class;
-    if (anonymous) {
+    if (is_unsafe_anonymous) {
       _num_anon_classes += num_classes;
     } else {
       _num_classes += num_classes;
@@ -420,7 +420,7 @@
     LoadedClassCollectClosure lccc(cld);
     const_cast<ClassLoaderData*>(cld)->classes_do(&lccc);
     if (lccc._num_classes > 0) {
-      info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous());
+      info->add_classes(lccc._list, lccc._num_classes, cld->is_unsafe_anonymous());
     }
   }
 
@@ -480,7 +480,7 @@
     assert(info != NULL, "must be");
 
     // Update CLD in node, but only if this is the primary CLD for this loader.
-    if (cld->is_anonymous() == false) {
+    if (cld->is_unsafe_anonymous() == false) {
       assert(info->cld() == NULL, "there should be only one primary CLD per loader");
       info->set_cld(cld);
     }
--- a/src/hotspot/share/classfile/classLoaderStats.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderStats.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -58,7 +58,7 @@
     cls = *cls_ptr;
   }
 
-  if (!cld->is_anonymous()) {
+  if (!cld->is_unsafe_anonymous()) {
     cls->_cld = cld;
   }
 
@@ -70,7 +70,7 @@
 
   ClassStatsClosure csc;
   cld->classes_do(&csc);
-  if(cld->is_anonymous()) {
+  if(cld->is_unsafe_anonymous()) {
     cls->_anon_classes_count += csc._num_classes;
   } else {
     cls->_classes_count = csc._num_classes;
@@ -79,7 +79,7 @@
 
   ClassLoaderMetaspace* ms = cld->metaspace_or_null();
   if (ms != NULL) {
-    if(cld->is_anonymous()) {
+    if(cld->is_unsafe_anonymous()) {
       cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
       cls->_anon_block_sz += ms->allocated_blocks_bytes();
     } else {
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -27,6 +27,7 @@
 #include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "logging/logMessage.hpp"
+#include "memory/heapShared.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "oops/compressedOops.inline.hpp"
@@ -280,8 +281,9 @@
 public:
   CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {}
   inline void do_value(address base_address, u4 offset) const {
-    narrowOop o = (narrowOop)offset;
-    _closure->do_oop(&o);
+    narrowOop v = (narrowOop)offset;
+    oop obj = HeapShared::decode_with_archived_oop_encoding_mode(v);
+    _closure->do_oop(&obj);
   }
 };
 
--- a/src/hotspot/share/classfile/compactHashtable.inline.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -28,7 +28,8 @@
 #include "classfile/compactHashtable.hpp"
 #include "classfile/javaClasses.hpp"
 #include "memory/allocation.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
+#include "memory/filemap.hpp"
+#include "memory/heapShared.inline.hpp"
 #include "oops/oop.hpp"
 
 template <class T, class N>
@@ -46,8 +47,8 @@
 template <class T, class N>
 inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
                                                 u4 offset, const char* name, int len) {
-  narrowOop obj = (narrowOop)offset;
-  oop string = CompressedOops::decode(obj);
+  narrowOop v = (narrowOop)offset;
+  oop string = HeapShared::decode_with_archived_oop_encoding_mode(v);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
   }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -885,7 +885,7 @@
     ConstantPool* cp = bpool->create_constant_pool(CHECK);
     if (cp != klass->constants()) {
       // Copy resolved anonymous class into new constant pool.
-      if (klass->is_anonymous()) {
+      if (klass->is_unsafe_anonymous()) {
         cp->klass_at_put(klass->this_class_index(), klass);
       }
       klass->class_loader_data()->add_to_deallocate_list(klass->constants());
--- a/src/hotspot/share/classfile/javaClasses.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -50,7 +50,7 @@
 #include "oops/symbol.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -1038,6 +1038,7 @@
     if (m != NULL) {
       // Update the field at _array_klass_offset to point to the relocated array klass.
       oop archived_m = MetaspaceShared::archive_heap_object(m, THREAD);
+      assert(archived_m != NULL, "sanity");
       Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
       assert(ak != NULL || t == T_VOID, "should not be NULL");
       if (ak != NULL) {
@@ -1212,7 +1213,7 @@
 bool java_lang_Class::restore_archived_mirror(Klass *k,
                                               Handle class_loader, Handle module,
                                               Handle protection_domain, TRAPS) {
-  oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw());
+  oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw_narrow());
 
   if (m == NULL) {
     return false;
@@ -3785,7 +3786,7 @@
     }
     oop new_resolved_method = k->allocate_instance(CHECK_NULL);
     new_resolved_method->address_field_put(_vmtarget_offset, (address)m());
-    // Add a reference to the loader (actually mirror because anonymous classes will not have
+    // Add a reference to the loader (actually mirror because unsafe anonymous classes will not have
     // distinct loaders) to ensure the metadata is kept alive.
     // This mirror may be different than the one in clazz field.
     new_resolved_method->obj_field_put(_vmholder_offset, m->method_holder()->java_mirror());
@@ -4247,15 +4248,7 @@
 int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
 int reflect_ConstantPool::_oop_offset;
 int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedConfiguration_offset;
-int java_lang_Integer_IntegerCache::_archivedCache_offset;
-int java_lang_module_Configuration::_EMPTY_CONFIGURATION_offset;
-int java_util_ImmutableCollections_ListN::_EMPTY_LIST_offset;
-int java_util_ImmutableCollections_SetN::_EMPTY_SET_offset;
-int java_util_ImmutableCollections_MapN::_EMPTY_MAP_offset;
+
 
 #define STACKTRACEELEMENT_FIELDS_DO(macro) \
   macro(declaringClassObject_offset,  k, "declaringClassObject", class_signature, false); \
@@ -4418,99 +4411,6 @@
   return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes();
 }
 
-#define INTEGERCACHE_FIELDS_DO(macro) \
-  macro(_archivedCache_offset,  k, "archivedCache",  java_lang_Integer_array_signature, true)
-
-void java_lang_Integer_IntegerCache::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::Integer_IntegerCache_klass();
-  assert(k != NULL, "must be loaded");
-  INTEGERCACHE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_lang_Integer_IntegerCache::serialize_offsets(SerializeClosure* f) {
-  INTEGERCACHE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define ARCHIVEDMODULEGRAPH_FIELDS_DO(macro) \
-  macro(_archivedSystemModules_offset,      k, "archivedSystemModules", systemModules_signature, true); \
-  macro(_archivedModuleFinder_offset,       k, "archivedModuleFinder",  moduleFinder_signature,  true); \
-  macro(_archivedMainModule_offset,         k, "archivedMainModule",    string_signature,        true); \
-  macro(_archivedConfiguration_offset,      k, "archivedConfiguration", configuration_signature, true)
-
-void jdk_internal_module_ArchivedModuleGraph::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass();
-  assert(k != NULL, "must be loaded");
-  ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void jdk_internal_module_ArchivedModuleGraph::serialize_offsets(SerializeClosure* f) {
-  ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define CONFIGURATION_FIELDS_DO(macro) \
-  macro(_EMPTY_CONFIGURATION_offset, k, "EMPTY_CONFIGURATION", configuration_signature, true)
-
-void java_lang_module_Configuration::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::Configuration_klass();
-  assert(k != NULL, "must be loaded");
-  CONFIGURATION_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_lang_module_Configuration::serialize_offsets(SerializeClosure* f) {
-  CONFIGURATION_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define LISTN_FIELDS_DO(macro) \
-  macro(_EMPTY_LIST_offset, k, "EMPTY_LIST", list_signature, true)
-
-void java_util_ImmutableCollections_ListN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_ListN_klass();
-  assert(k != NULL, "must be loaded");
-  LISTN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_ListN::serialize_offsets(SerializeClosure* f) {
-  LISTN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define SETN_FIELDS_DO(macro) \
-  macro(_EMPTY_SET_offset, k, "EMPTY_SET", set_signature, true)
-
-void java_util_ImmutableCollections_SetN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_SetN_klass();
-  assert(k != NULL, "must be loaded");
-  SETN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_SetN::serialize_offsets(SerializeClosure* f) {
-  SETN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define MAPN_FIELDS_DO(macro) \
-  macro(_EMPTY_MAP_offset, k, "EMPTY_MAP", map_signature, true)
-
-void java_util_ImmutableCollections_MapN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_MapN_klass();
-  assert(k != NULL, "must be loaded");
-  MAPN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_MapN::serialize_offsets(SerializeClosure* f) {
-  MAPN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
 // Compute hard-coded offsets
 // Invoked before SystemDictionary::initialize, so pre-loaded classes
 // are not available to determine the offset_of_static_fields.
--- a/src/hotspot/share/classfile/javaClasses.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -82,12 +82,6 @@
   f(java_lang_StackFrameInfo) \
   f(java_lang_LiveStackFrameInfo) \
   f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
-  f(jdk_internal_module_ArchivedModuleGraph) \
-  f(java_lang_Integer_IntegerCache) \
-  f(java_lang_module_Configuration) \
-  f(java_util_ImmutableCollections_ListN) \
-  f(java_util_ImmutableCollections_MapN) \
-  f(java_util_ImmutableCollections_SetN) \
   //end
 
 #define BASIC_JAVA_CLASSES_DO(f) \
@@ -1531,66 +1525,6 @@
   static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 };
 
-class java_lang_Integer_IntegerCache: AllStatic {
- private:
-  static int _archivedCache_offset;
- public:
-  static int archivedCache_offset()  { return _archivedCache_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class jdk_internal_module_ArchivedModuleGraph: AllStatic {
- private:
-  static int _archivedSystemModules_offset;
-  static int _archivedModuleFinder_offset;
-  static int _archivedMainModule_offset;
-  static int _archivedConfiguration_offset;
- public:
-  static int  archivedSystemModules_offset()      { return _archivedSystemModules_offset; }
-  static int  archivedModuleFinder_offset()       { return _archivedModuleFinder_offset; }
-  static int  archivedMainModule_offset()         { return _archivedMainModule_offset; }
-  static int  archivedConfiguration_offset()      { return _archivedConfiguration_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_lang_module_Configuration: AllStatic {
- private:
-  static int _EMPTY_CONFIGURATION_offset;
- public:
-  static int EMPTY_CONFIGURATION_offset() { return _EMPTY_CONFIGURATION_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_ListN : AllStatic {
- private:
-  static int _EMPTY_LIST_offset;
- public:
-  static int EMPTY_LIST_offset() { return _EMPTY_LIST_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_SetN : AllStatic {
- private:
-  static int _EMPTY_SET_offset;
- public:
-  static int EMPTY_SET_offset() { return _EMPTY_SET_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_MapN : AllStatic {
- private:
-  static int _EMPTY_MAP_offset;
- public:
-  static int EMPTY_MAP_offset() { return _EMPTY_MAP_offset; }
-  static void compute_offsets();
-  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
 // Use to declare fields that need to be injected into Java classes
 // for the JVM to use.  The name_index and signature_index are
 // declared in vmSymbols.  The may_be_java flag is used to declare
--- a/src/hotspot/share/classfile/klassFactory.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -183,7 +183,7 @@
                                                 Symbol* name,
                                                 ClassLoaderData* loader_data,
                                                 Handle protection_domain,
-                                                const InstanceKlass* host_klass,
+                                                const InstanceKlass* unsafe_anonymous_host,
                                                 GrowableArray<Handle>* cp_patches,
                                                 TRAPS) {
   assert(stream != NULL, "invariant");
@@ -201,7 +201,7 @@
   THREAD->statistical_info().incr_define_class_count();
 
   // Skip this processing for VM anonymous classes
-  if (host_klass == NULL) {
+  if (unsafe_anonymous_host == NULL) {
     stream = check_class_file_load_hook(stream,
                                         name,
                                         loader_data,
@@ -214,7 +214,7 @@
                          name,
                          loader_data,
                          protection_domain,
-                         host_klass,
+                         unsafe_anonymous_host,
                          cp_patches,
                          ClassFileParser::BROADCAST, // publicity level
                          CHECK_NULL);
--- a/src/hotspot/share/classfile/klassFactory.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/klassFactory.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,7 @@
                                            Symbol* name,
                                            ClassLoaderData* loader_data,
                                            Handle protection_domain,
-                                           const InstanceKlass* host_klass,
+                                           const InstanceKlass* unsafe_anonymous_host,
                                            GrowableArray<Handle>* cp_patches,
                                            TRAPS);
  public:
--- a/src/hotspot/share/classfile/moduleEntry.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/moduleEntry.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -110,7 +110,7 @@
   ClassLoaderData* loader_data() const                 { return _loader_data; }
 
   void set_loader_data(ClassLoaderData* cld) {
-    assert(!cld->is_anonymous(), "Unexpected anonymous class loader data");
+    assert(!cld->is_unsafe_anonymous(), "Unexpected unsafe anonymous class loader data");
     _loader_data = cld;
   }
 
--- a/src/hotspot/share/classfile/resolutionErrors.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/resolutionErrors.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -65,9 +65,10 @@
 }
 
 void ResolutionErrorEntry::set_message(Symbol* c) {
-  assert(c != NULL, "must set a value");
   _message = c;
-  _message->increment_refcount();
+  if (_message != NULL) {
+    _message->increment_refcount();
+  }
 }
 
 // create new error entry
@@ -87,7 +88,9 @@
   // decrement error refcount
   assert(entry->error() != NULL, "error should be set");
   entry->error()->decrement_refcount();
-  entry->message()->decrement_refcount();
+  if (entry->message() != NULL) {
+    entry->message()->decrement_refcount();
+  }
   Hashtable<ConstantPool*, mtClass>::free_entry(entry);
 }
 
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -115,10 +115,15 @@
     return fail("Corrupted archive file header");
   }
 
+  jshort cur_index = 0;
+  jshort max_cp_index = FileMapInfo::current_info()->header()->max_used_path_index();
+  jshort module_paths_start_index =
+    FileMapInfo::current_info()->header()->app_module_paths_start_index();
   while (_cur_ptr < _end_ptr) {
     jint type;
     const char* path = _cur_ptr;
     _cur_ptr += strlen(path) + 1;
+
     if (!read_jint(&type)) {
       return fail("Corrupted archive file header");
     }
@@ -129,13 +134,19 @@
       print_path(&ls, type, path);
       ls.cr();
     }
-    if (!check(type, path)) {
-      if (!PrintSharedArchiveAndExit) {
-        return false;
+    // skip checking the class path(s) which was not referenced during CDS dump
+    if ((cur_index <= max_cp_index) || (cur_index >= module_paths_start_index)) {
+      if (!check(type, path)) {
+        if (!PrintSharedArchiveAndExit) {
+          return false;
+        }
+      } else {
+        ClassLoader::trace_class_path("ok");
       }
     } else {
-      ClassLoader::trace_class_path("ok");
+      ClassLoader::trace_class_path("skipped check");
     }
+    cur_index++;
   }
 
   return true;
--- a/src/hotspot/share/classfile/symbolTable.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -448,7 +448,7 @@
 #ifdef ASSERT
     assert(sym->utf8_length() == _len, "%s [%d,%d]", where, sym->utf8_length(), _len);
     for (int i = 0; i < _len; i++) {
-      assert(sym->byte_at(i) == _name[i],
+      assert(sym->byte_at(i) == (jbyte) _name[i],
              "%s [%d,%d,%d]", where, i, sym->byte_at(i), _name[i]);
     }
 #endif
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -988,18 +988,18 @@
                                               Handle class_loader,
                                               Handle protection_domain,
                                               ClassFileStream* st,
-                                              const InstanceKlass* host_klass,
+                                              const InstanceKlass* unsafe_anonymous_host,
                                               GrowableArray<Handle>* cp_patches,
                                               TRAPS) {
 
   EventClassLoad class_load_start_event;
 
   ClassLoaderData* loader_data;
-  if (host_klass != NULL) {
-    // Create a new CLD for anonymous class, that uses the same class loader
-    // as the host_klass
-    guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
-    loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
+  if (unsafe_anonymous_host != NULL) {
+    // Create a new CLD for an unsafe anonymous class, that uses the same class loader
+    // as the unsafe_anonymous_host
+    guarantee(oopDesc::equals(unsafe_anonymous_host->class_loader(), class_loader()), "should be the same");
+    loader_data = ClassLoaderData::unsafe_anonymous_class_loader_data(class_loader);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
   }
@@ -1016,12 +1016,12 @@
                                                       class_name,
                                                       loader_data,
                                                       protection_domain,
-                                                      host_klass,
+                                                      unsafe_anonymous_host,
                                                       cp_patches,
                                                       CHECK_NULL);
 
-  if (host_klass != NULL && k != NULL) {
-    // Anonymous classes must update ClassLoaderData holder (was host_klass loader)
+  if (unsafe_anonymous_host != NULL && k != NULL) {
+    // Unsafe anonymous classes must update ClassLoaderData holder (was unsafe_anonymous_host loader)
     // so that they can be unloaded when the mirror is no longer referenced.
     k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror()));
 
@@ -1056,8 +1056,8 @@
       post_class_load_event(&class_load_start_event, k, loader_data);
     }
   }
-  assert(host_klass != NULL || NULL == cp_patches,
-         "cp_patches only found with host_klass");
+  assert(unsafe_anonymous_host != NULL || NULL == cp_patches,
+         "cp_patches only found with unsafe_anonymous_host");
 
   return k;
 }
@@ -1115,7 +1115,7 @@
                                          class_name,
                                          loader_data,
                                          protection_domain,
-                                         NULL, // host_klass
+                                         NULL, // unsafe_anonymous_host
                                          NULL, // cp_patches
                                          CHECK_NULL);
   }
@@ -1883,7 +1883,7 @@
 
   if (do_cleaning) {
     GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
-    ResolvedMethodTable::unlink();
+    ResolvedMethodTable::trigger_cleanup();
   }
 
   return unloading_occurred;
@@ -3010,7 +3010,7 @@
       _master_dictionary(master_dictionary) {}
     void do_cld(ClassLoaderData* cld) {
       ResourceMark rm;
-      if (cld->is_anonymous()) {
+      if (cld->is_unsafe_anonymous()) {
         return;
       }
       if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -187,11 +187,6 @@
   do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass,      jdk_internal_loader_ClassLoaders_AppClassLoader,       Pre ) \
   do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader,  Pre ) \
   do_klass(CodeSource_klass,                            java_security_CodeSource,                  Pre                 ) \
-  do_klass(Configuration_klass,                         java_lang_module_Configuration,            Pre                 ) \
-  do_klass(ImmutableCollections_ListN_klass,            java_util_ImmutableCollections_ListN,      Pre                 ) \
-  do_klass(ImmutableCollections_MapN_klass,             java_util_ImmutableCollections_MapN,       Pre                 ) \
-  do_klass(ImmutableCollections_SetN_klass,             java_util_ImmutableCollections_SetN,       Pre                 ) \
-  do_klass(ArchivedModuleGraph_klass,                   jdk_internal_module_ArchivedModuleGraph,   Pre                 ) \
                                                                                                                          \
   do_klass(StackTraceElement_klass,                     java_lang_StackTraceElement,               Opt                 ) \
                                                                                                                          \
@@ -215,7 +210,6 @@
   do_klass(Byte_klass,                                  java_lang_Byte,                            Pre                 ) \
   do_klass(Short_klass,                                 java_lang_Short,                           Pre                 ) \
   do_klass(Integer_klass,                               java_lang_Integer,                         Pre                 ) \
-  do_klass(Integer_IntegerCache_klass,                  java_lang_Integer_IntegerCache,            Pre                 ) \
   do_klass(Long_klass,                                  java_lang_Long,                            Pre                 ) \
                                                                                                                          \
   /* JVMCI classes. These are loaded on-demand. */                                                                       \
@@ -304,7 +298,7 @@
                         class_loader,
                         protection_domain,
                         st,
-                        NULL, // host klass
+                        NULL, // unsafe_anonymous_host
                         NULL, // cp_patches
                         THREAD);
   }
@@ -312,7 +306,7 @@
                                      Handle class_loader,
                                      Handle protection_domain,
                                      ClassFileStream* st,
-                                     const InstanceKlass* host_klass,
+                                     const InstanceKlass* unsafe_anonymous_host,
                                      GrowableArray<Handle>* cp_patches,
                                      TRAPS);
 
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -755,11 +755,11 @@
          Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
   assert(DumpSharedSpaces, "called at dump time only");
 
-  // Skip anonymous classes, which are not archived as they are not in
-  // dictionary (see assert_no_anonymoys_classes_in_dictionaries() in
+  // Skip unsafe anonymous classes, which are not archived as they are not in
+  // dictionary (see assert_no_unsafe_anonymous_classes_in_dictionaries() in
   // VM_PopulateDumpSharedSpace::doit()).
-  if (k->class_loader_data()->is_anonymous()) {
-    return true; // anonymous classes are not archived, skip
+  if (k->class_loader_data()->is_unsafe_anonymous()) {
+    return true; // unsafe anonymous classes are not archived, skip
   }
 
   SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k);
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -293,9 +293,6 @@
 
   static void allocate_shared_data_arrays(int size, TRAPS);
   static void oops_do(OopClosure* f);
-  static void roots_oops_do(OopClosure* f) {
-    oops_do(f);
-  }
 
   // Check if sharing is supported for the class loader.
   static bool is_sharing_possible(ClassLoaderData* loader_data);
--- a/src/hotspot/share/classfile/verifier.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/verifier.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -2823,20 +2823,20 @@
                   current_class()->super()->name()))) {
     bool subtype = false;
     bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref;
-    if (!current_class()->is_anonymous()) {
+    if (!current_class()->is_unsafe_anonymous()) {
       subtype = ref_class_type.is_assignable_from(
                  current_type(), this, false, CHECK_VERIFY(this));
     } else {
-      VerificationType host_klass_type =
-                        VerificationType::reference_type(current_class()->host_klass()->name());
-      subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this));
+      VerificationType unsafe_anonymous_host_type =
+                        VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name());
+      subtype = ref_class_type.is_assignable_from(unsafe_anonymous_host_type, this, false, CHECK_VERIFY(this));
 
       // If invokespecial of IMR, need to recheck for same or
       // direct interface relative to the host class
       have_imr_indirect = (have_imr_indirect &&
                            !is_same_or_direct_interface(
-                             current_class()->host_klass(),
-                             host_klass_type, ref_class_type));
+                             current_class()->unsafe_anonymous_host(),
+                             unsafe_anonymous_host_type, ref_class_type));
     }
     if (!subtype) {
       verify_error(ErrorContext::bad_code(bci),
@@ -2866,15 +2866,15 @@
     } else {   // other methods
       // Ensures that target class is assignable to method class.
       if (opcode == Bytecodes::_invokespecial) {
-        if (!current_class()->is_anonymous()) {
+        if (!current_class()->is_unsafe_anonymous()) {
           current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
         } else {
           // anonymous class invokespecial calls: check if the
-          // objectref is a subtype of the host_klass of the current class
-          // to allow an anonymous class to reference methods in the host_klass
+          // objectref is a subtype of the unsafe_anonymous_host of the current class
+          // to allow an anonymous class to reference methods in the unsafe_anonymous_host
           VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
           VerificationType hosttype =
-            VerificationType::reference_type(current_class()->host_klass()->name());
+            VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name());
           bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this));
           if (!subtype) {
             verify_error( ErrorContext::bad_type(current_frame->offset(),
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -124,7 +124,6 @@
   template(getBootClassPathEntryForClass_name,        "getBootClassPathEntryForClass")            \
   template(jdk_internal_vm_PostVMInitHook,            "jdk/internal/vm/PostVMInitHook")           \
   template(sun_net_www_ParseUtil,                     "sun/net/www/ParseUtil")                    \
-  template(jdk_internal_module_ArchivedModuleGraph,   "jdk/internal/module/ArchivedModuleGraph")  \
                                                                                                   \
   template(jdk_internal_loader_ClassLoaders_AppClassLoader,      "jdk/internal/loader/ClassLoaders$AppClassLoader")      \
   template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \
@@ -650,17 +649,7 @@
   JFR_TEMPLATES(template)                                                                                         \
                                                                                                                   \
   /* cds */                                                                                                       \
-  template(configuration_signature,                "Ljava/lang/module/Configuration;")                            \
-  template(java_lang_module_Configuration,         "java/lang/module/Configuration")                              \
-  template(java_util_ImmutableCollections_ListN,   "java/util/ImmutableCollections$ListN")                        \
-  template(java_util_ImmutableCollections_MapN,    "java/util/ImmutableCollections$MapN")                         \
-  template(java_util_ImmutableCollections_SetN,    "java/util/ImmutableCollections$SetN")                         \
   template(jdk_internal_loader_ClassLoaders,       "jdk/internal/loader/ClassLoaders")                            \
-  template(list_signature,                         "Ljava/util/List;")                                            \
-  template(map_signature,                          "Ljava/util/Map;")                                             \
-  template(moduleFinder_signature,                 "Ljava/lang/module/ModuleFinder;")                             \
-  template(set_signature,                          "Ljava/util/Set;")                                             \
-  template(systemModules_signature,                "Ljdk/internal/module/SystemModules;")                         \
   template(toFileURL_name,                         "toFileURL")                                                   \
   template(toFileURL_signature,                    "(Ljava/lang/String;)Ljava/net/URL;")                          \
   template(url_void_signature,                     "(Ljava/net/URL;)V")                                           \
--- a/src/hotspot/share/code/compiledMethod.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -202,7 +202,7 @@
 
   virtual address verified_entry_point() const = 0;
   virtual void log_identity(xmlStream* log) const = 0;
-  virtual void log_state_change() const = 0;
+  virtual void log_state_change(oop cause = NULL) const = 0;
   virtual bool make_not_used() = 0;
   virtual bool make_not_entrant() = 0;
   virtual bool make_entrant() = 0;
--- a/src/hotspot/share/code/nmethod.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -422,7 +422,7 @@
 #if INCLUDE_JVMCI
   _jvmci_installed_code   = NULL;
   _speculation_log        = NULL;
-  _jvmci_installed_code_triggers_unloading = false;
+  _jvmci_installed_code_triggers_invalidation = false;
 #endif
 }
 
@@ -690,9 +690,9 @@
     _speculation_log = speculation_log;
     oop obj = JNIHandles::resolve(installed_code);
     if (obj == NULL || (obj->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(obj))) {
-      _jvmci_installed_code_triggers_unloading = false;
+      _jvmci_installed_code_triggers_invalidation = false;
     } else {
-      _jvmci_installed_code_triggers_unloading = true;
+      _jvmci_installed_code_triggers_invalidation = true;
     }
 
     if (compiler->is_jvmci()) {
@@ -786,6 +786,13 @@
   if (TieredCompilation) {
     log->print(" level='%d'", comp_level());
   }
+#if INCLUDE_JVMCI
+    char buffer[O_BUFLEN];
+    char* jvmci_name = jvmci_installed_code_name(buffer, O_BUFLEN);
+    if (jvmci_name != NULL) {
+      log->print(" jvmci_installed_code_name='%s'", jvmci_name);
+    }
+#endif
 }
 
 
@@ -1083,7 +1090,7 @@
   _state = unloaded;
 
   // Log the unloading.
-  log_state_change();
+  log_state_change(cause);
 
 #if INCLUDE_JVMCI
   // The method can only be unloaded after the pointer to the installed code
@@ -1107,7 +1114,7 @@
   }
 }
 
-void nmethod::log_state_change() const {
+void nmethod::log_state_change(oop cause) const {
   if (LogCompilation) {
     if (xtty != NULL) {
       ttyLocker ttyl;  // keep the following output all in one block
@@ -1120,6 +1127,9 @@
                          (_state == zombie ? " zombie='1'" : ""));
       }
       log_identity(xtty);
+      if (cause != NULL) {
+        xtty->print(" cause='%s'", cause->klass()->external_name());
+      }
       xtty->stamp();
       xtty->end_elem();
     }
@@ -1150,7 +1160,8 @@
   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
   methodHandle the_method(method());
-  NoSafepointVerifier nsv;
+  // This can be called while the system is already at a safepoint which is ok
+  NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
 
   // during patching, depending on the nmethod state we must notify the GC that
   // code has been unloaded, unregistering it. We cannot do this right while
@@ -1507,13 +1518,12 @@
 bool nmethod::do_unloading_jvmci() {
   if (_jvmci_installed_code != NULL) {
     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
-      if (_jvmci_installed_code_triggers_unloading) {
-        // jweak reference processing has already cleared the referent
-        make_unloaded(NULL);
-        return true;
-      } else {
-        clear_jvmci_installed_code();
+      if (_jvmci_installed_code_triggers_invalidation) {
+        // The reference to the installed code has been dropped so invalidate
+        // this nmethod and allow the sweeper to reclaim it.
+        make_not_entrant();
       }
+      clear_jvmci_installed_code();
     }
   }
   return false;
@@ -2948,7 +2958,7 @@
   return JNIHandles::resolve(_speculation_log);
 }
 
-char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
+char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const {
   if (!this->is_compiled_by_jvmci()) {
     return NULL;
   }
--- a/src/hotspot/share/code/nmethod.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -78,7 +78,7 @@
   // That is, installed code other than a "default"
   // HotSpotNMethod causes nmethod unloading.
   // This field is ignored once _jvmci_installed_code is NULL.
-  bool _jvmci_installed_code_triggers_unloading;
+  bool _jvmci_installed_code_triggers_invalidation;
 #endif
 
   // To support simple linked-list chaining of nmethods:
@@ -456,7 +456,7 @@
   // Copies the value of the name field in the InstalledCode
   // object (if any) associated with this nmethod into buf.
   // Returns the value of buf if it was updated otherwise NULL.
-  char* jvmci_installed_code_name(char* buf, size_t buflen);
+  char* jvmci_installed_code_name(char* buf, size_t buflen) const;
 
   // Updates the state of the InstalledCode (if any) associated with
   // this nmethod based on the current value of _state.
@@ -486,7 +486,7 @@
  protected:
   virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
 #if INCLUDE_JVMCI
-  // See comment for _jvmci_installed_code_triggers_unloading field.
+  // See comment for _jvmci_installed_code_triggers_invalidation field.
   // Returns whether this nmethod was unloaded.
   virtual bool do_unloading_jvmci();
 #endif
@@ -555,7 +555,7 @@
   // Logging
   void log_identity(xmlStream* log) const;
   void log_new_nmethod() const;
-  void log_state_change() const;
+  void log_state_change(oop cause = NULL) const;
 
   // Prints block-level comments, including nmethod specific block labels:
   virtual void print_block_comment(outputStream* stream, address block_begin) const {
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -243,7 +243,7 @@
       // sets for old regions.
       r->rem_set()->clear(true /* only_cardset */);
     } else {
-      assert(!r->is_old() || !r->rem_set()->is_tracked(),
+      assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),
              "Missed to clear unused remembered set of region %u (%s) that is %s",
              r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
     }
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
 #define SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
 
-#include "gc/g1/ptrQueue.hpp"
+#include "gc/shared/ptrQueue.hpp"
 #include "memory/allocation.hpp"
 
 class FreeIdSet;
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -361,7 +361,7 @@
     hr->set_closed_archive();
   }
   _g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
-  _g1h->old_set_add(hr);
+  _g1h->archive_set_add(hr);
   _g1h->hr_printer()->alloc(hr);
   _allocated_regions.append(hr);
   _allocation_region = hr;
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -27,9 +27,10 @@
 #include "gc/g1/g1BarrierSetAssembler.hpp"
 #include "gc/g1/g1CardTable.inline.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1SATBMarkQueueSet.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
 #include "logging/log.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/compressedOops.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -83,7 +83,6 @@
 #include "oops/access.inline.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "prims/resolvedMethodTable.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.inline.hpp"
@@ -644,7 +643,7 @@
         curr_region->set_closed_archive();
       }
       _hr_printer.alloc(curr_region);
-      _old_set.add(curr_region);
+      _archive_set.add(curr_region);
       HeapWord* top;
       HeapRegion* next_region;
       if (curr_region != last_region) {
@@ -801,7 +800,7 @@
       guarantee(curr_region->is_archive(),
                 "Expected archive region at index %u", curr_region->hrm_index());
       uint curr_index = curr_region->hrm_index();
-      _old_set.remove(curr_region);
+      _archive_set.remove(curr_region);
       curr_region->set_free();
       curr_region->set_top(curr_region->bottom());
       if (curr_region != last_region) {
@@ -1126,7 +1125,7 @@
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
       soft_ref_policy()->should_clear_all_soft_refs();
 
-  G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
+  G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
   GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
 
   collector.prepare_collection();
@@ -1406,6 +1405,68 @@
   _verifier->verify_region_sets_optional();
 }
 
+class OldRegionSetChecker : public HeapRegionSetChecker {
+public:
+  void check_mt_safety() {
+    // Master Old Set MT safety protocol:
+    // (a) If we're at a safepoint, operations on the master old set
+    // should be invoked:
+    // - by the VM thread (which will serialize them), or
+    // - by the GC workers while holding the FreeList_lock, if we're
+    //   at a safepoint for an evacuation pause (this lock is taken
+    //   anyway when an GC alloc region is retired so that a new one
+    //   is allocated from the free list), or
+    // - by the GC workers while holding the OldSets_lock, if we're at a
+    //   safepoint for a cleanup pause.
+    // (b) If we're not at a safepoint, operations on the master old set
+    // should be invoked while holding the Heap_lock.
+
+    if (SafepointSynchronize::is_at_safepoint()) {
+      guarantee(Thread::current()->is_VM_thread() ||
+                FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
+                "master old set MT safety protocol at a safepoint");
+    } else {
+      guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
+    }
+  }
+  bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }
+  const char* get_description() { return "Old Regions"; }
+};
+
+class ArchiveRegionSetChecker : public HeapRegionSetChecker {
+public:
+  void check_mt_safety() {
+    guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),
+              "May only change archive regions during initialization or safepoint.");
+  }
+  bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }
+  const char* get_description() { return "Archive Regions"; }
+};
+
+class HumongousRegionSetChecker : public HeapRegionSetChecker {
+public:
+  void check_mt_safety() {
+    // Humongous Set MT safety protocol:
+    // (a) If we're at a safepoint, operations on the master humongous
+    // set should be invoked by either the VM thread (which will
+    // serialize them) or by the GC workers while holding the
+    // OldSets_lock.
+    // (b) If we're not at a safepoint, operations on the master
+    // humongous set should be invoked while holding the Heap_lock.
+
+    if (SafepointSynchronize::is_at_safepoint()) {
+      guarantee(Thread::current()->is_VM_thread() ||
+                OldSets_lock->owned_by_self(),
+                "master humongous set MT safety protocol at a safepoint");
+    } else {
+      guarantee(Heap_lock->owned_by_self(),
+                "master humongous set MT safety protocol outside a safepoint");
+    }
+  }
+  bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
+  const char* get_description() { return "Humongous Regions"; }
+};
+
 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
   CollectedHeap(),
   _young_gen_sampling_thread(NULL),
@@ -1413,13 +1474,9 @@
   _collector_policy(collector_policy),
   _card_table(NULL),
   _soft_ref_policy(),
-  _memory_manager("G1 Young Generation", "end of minor GC"),
-  _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
-  _eden_pool(NULL),
-  _survivor_pool(NULL),
-  _old_pool(NULL),
-  _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
-  _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
+  _old_set("Old Region Set", new OldRegionSetChecker()),
+  _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
+  _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
   _bot(NULL),
   _listener(),
   _hrm(),
@@ -1746,20 +1803,6 @@
   return JNI_OK;
 }
 
-void G1CollectedHeap::initialize_serviceability() {
-  _eden_pool = new G1EdenPool(this);
-  _survivor_pool = new G1SurvivorPool(this);
-  _old_pool = new G1OldGenPool(this);
-
-  _full_gc_memory_manager.add_pool(_eden_pool);
-  _full_gc_memory_manager.add_pool(_survivor_pool);
-  _full_gc_memory_manager.add_pool(_old_pool);
-
-  _memory_manager.add_pool(_eden_pool);
-  _memory_manager.add_pool(_survivor_pool);
-  _memory_manager.add_pool(_old_pool, false /* always_affected_by_gc */);
-}
-
 void G1CollectedHeap::stop() {
   // Stop all concurrent threads. We do this to make sure these threads
   // do not continue to execute and access resources (e.g. logging)
@@ -2856,9 +2899,9 @@
     active_workers = workers()->update_active_workers(active_workers);
     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
 
-    TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
-    TraceMemoryManagerStats tms(&_memory_manager, gc_cause(),
-                                collector_state()->yc_type() == Mixed /* allMemoryPoolsAffected */);
+    G1MonitoringScope ms(g1mm(),
+                         false /* full_gc */,
+                         collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
 
     G1HeapTransition heap_transition(this);
     size_t heap_used_bytes_before_gc = used();
@@ -3532,28 +3575,6 @@
   }
 };
 
-class G1ResolvedMethodCleaningTask : public StackObj {
-  volatile int       _resolved_method_task_claimed;
-public:
-  G1ResolvedMethodCleaningTask() :
-      _resolved_method_task_claimed(0) {}
-
-  bool claim_resolved_method_task() {
-    if (_resolved_method_task_claimed) {
-      return false;
-    }
-    return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0;
-  }
-
-  // These aren't big, one thread can do it all.
-  void work() {
-    if (claim_resolved_method_task()) {
-      ResolvedMethodTable::unlink();
-    }
-  }
-};
-
-
 // To minimize the remark pause times, the tasks below are done in parallel.
 class G1ParallelCleaningTask : public AbstractGangTask {
 private:
@@ -3561,7 +3582,6 @@
   G1StringCleaningTask          _string_task;
   G1CodeCacheUnloadingTask      _code_cache_task;
   G1KlassCleaningTask           _klass_cleaning_task;
-  G1ResolvedMethodCleaningTask  _resolved_method_cleaning_task;
 
 public:
   // The constructor is run in the VMThread.
@@ -3570,8 +3590,7 @@
       _unloading_occurred(unloading_occurred),
       _string_task(is_alive, true, G1StringDedup::is_enabled()),
       _code_cache_task(num_workers, is_alive, unloading_occurred),
-      _klass_cleaning_task(),
-      _resolved_method_cleaning_task() {
+      _klass_cleaning_task() {
   }
 
   // The parallel work done by all worker threads.
@@ -3585,9 +3604,6 @@
     // Clean the Strings.
     _string_task.work(worker_id);
 
-    // Clean unreferenced things in the ResolvedMethodTable
-    _resolved_method_cleaning_task.work();
-
     // Wait for all workers to finish the first code cache cleaning pass.
     _code_cache_task.barrier_wait(worker_id);
 
@@ -4621,7 +4637,6 @@
 #endif // ASSERT
 
 class TearDownRegionSetsClosure : public HeapRegionClosure {
-private:
   HeapRegionSet *_old_set;
 
 public:
@@ -4634,9 +4649,9 @@
       r->uninstall_surv_rate_group();
     } else {
       // We ignore free regions, we'll empty the free list afterwards.
-      // We ignore humongous regions, we're not tearing down the
-      // humongous regions set.
-      assert(r->is_free() || r->is_humongous(),
+      // We ignore humongous and archive regions, we're not tearing down these
+      // sets.
+      assert(r->is_archive() || r->is_free() || r->is_humongous(),
              "it cannot be another type");
     }
     return false;
@@ -4679,14 +4694,17 @@
 
 class RebuildRegionSetsClosure : public HeapRegionClosure {
 private:
-  bool            _free_list_only;
-  HeapRegionSet*   _old_set;
-  HeapRegionManager*   _hrm;
-  size_t          _total_used;
+  bool _free_list_only;
+
+  HeapRegionSet* _old_set;
+  HeapRegionManager* _hrm;
+
+  size_t _total_used;
 
 public:
   RebuildRegionSetsClosure(bool free_list_only,
-                           HeapRegionSet* old_set, HeapRegionManager* hrm) :
+                           HeapRegionSet* old_set,
+                           HeapRegionManager* hrm) :
     _free_list_only(free_list_only),
     _old_set(old_set), _hrm(hrm), _total_used(0) {
     assert(_hrm->num_free_regions() == 0, "pre-condition");
@@ -4704,11 +4722,11 @@
       _hrm->insert_into_free_list(r);
     } else if (!_free_list_only) {
 
-      if (r->is_humongous()) {
-        // We ignore humongous regions. We left the humongous set unchanged.
+      if (r->is_archive() || r->is_humongous()) {
+        // We ignore archive and humongous regions. We left these sets unchanged.
       } else {
         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
-        // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
+        // We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
         r->move_to_old();
         _old_set->add(r);
       }
@@ -4782,7 +4800,7 @@
   _hr_printer.retire(alloc_region);
   // We update the eden sizes here, when the region is retired,
   // instead of when it's allocated, since this is the point that its
-  // used space has been recored in _summary_bytes_used.
+  // used space has been recorded in _summary_bytes_used.
   g1mm()->update_eden_size();
 }
 
@@ -4833,7 +4851,7 @@
   alloc_region->note_end_of_copying(during_im);
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
   if (dest.is_old()) {
-    _old_set.add(alloc_region);
+    old_set_add(alloc_region);
   }
   _hr_printer.retire(alloc_region);
 }
@@ -4958,17 +4976,14 @@
   CodeCache::blobs_do(&blob_cl);
 }
 
+void G1CollectedHeap::initialize_serviceability() {
+  _g1mm->initialize_serviceability();
+}
+
 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
-  GrowableArray<GCMemoryManager*> memory_managers(2);
-  memory_managers.append(&_memory_manager);
-  memory_managers.append(&_full_gc_memory_manager);
-  return memory_managers;
+  return _g1mm->memory_managers();
 }
 
 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
-  GrowableArray<MemoryPool*> memory_pools(3);
-  memory_pools.append(_eden_pool);
-  memory_pools.append(_survivor_pool);
-  memory_pools.append(_old_pool);
-  return memory_pools;
+  return _g1mm->memory_pools();
 }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -51,7 +51,6 @@
 #include "gc/shared/preservedMarks.hpp"
 #include "gc/shared/softRefPolicy.hpp"
 #include "memory/memRegion.hpp"
-#include "services/memoryManager.hpp"
 #include "utilities/stack.hpp"
 
 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
@@ -67,6 +66,7 @@
 class G1ParScanThreadStateSet;
 class G1ParScanThreadState;
 class MemoryPool;
+class MemoryManager;
 class ObjectClosure;
 class SpaceClosure;
 class CompactibleSpaceClosure;
@@ -160,23 +160,13 @@
 
   SoftRefPolicy      _soft_ref_policy;
 
-  GCMemoryManager _memory_manager;
-  GCMemoryManager _full_gc_memory_manager;
-
-  MemoryPool* _eden_pool;
-  MemoryPool* _survivor_pool;
-  MemoryPool* _old_pool;
-
   static size_t _humongous_object_threshold_in_words;
 
-  // It keeps track of the old regions.
+  // These sets keep track of old, archive and humongous regions respectively.
   HeapRegionSet _old_set;
-
-  // It keeps track of the humongous regions.
+  HeapRegionSet _archive_set;
   HeapRegionSet _humongous_set;
 
-  virtual void initialize_serviceability();
-
   void eagerly_reclaim_humongous_regions();
   // Start a new incremental collection set for the next pause.
   void start_new_collection_set();
@@ -970,6 +960,7 @@
 
   virtual SoftRefPolicy* soft_ref_policy();
 
+  virtual void initialize_serviceability();
   virtual GrowableArray<GCMemoryManager*> memory_managers();
   virtual GrowableArray<MemoryPool*> memory_pools();
 
@@ -1046,8 +1037,10 @@
   inline void old_set_add(HeapRegion* hr);
   inline void old_set_remove(HeapRegion* hr);
 
+  inline void archive_set_add(HeapRegion* hr);
+
   size_t non_young_capacity_bytes() {
-    return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
+    return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
   }
 
   // Determine whether the given region is one that we are using as an
@@ -1232,20 +1225,11 @@
 
   const G1SurvivorRegions* survivor() const { return &_survivor; }
 
-  uint survivor_regions_count() const {
-    return _survivor.length();
-  }
-
-  uint eden_regions_count() const {
-    return _eden.length();
-  }
-
-  uint young_regions_count() const {
-    return _eden.length() + _survivor.length();
-  }
-
+  uint eden_regions_count() const { return _eden.length(); }
+  uint survivor_regions_count() const { return _survivor.length(); }
+  uint young_regions_count() const { return _eden.length() + _survivor.length(); }
   uint old_regions_count() const { return _old_set.length(); }
-
+  uint archive_regions_count() const { return _archive_set.length(); }
   uint humongous_regions_count() const { return _humongous_set.length(); }
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -92,6 +92,10 @@
   _old_set.remove(hr);
 }
 
+inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
+  _archive_set.add(hr);
+}
+
 // It dirties the cards that cover the block so that the post
 // write barrier never queues anything when updating objects on this
 // block. It is assumed (and in fact we assert) that the block
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -328,10 +328,10 @@
   return cl.valid();
 }
 
-class G1PrintCollectionSetClosure : public HeapRegionClosure {
+class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
   outputStream* _st;
 public:
-  G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
+  G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
 
   virtual bool do_heap_region(HeapRegion* r) {
     assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
@@ -347,7 +347,7 @@
 void G1CollectionSet::print(outputStream* st) {
   st->print_cr("\nCollection_set:");
 
-  G1PrintCollectionSetClosure cl(st);
+  G1PrintCollectionSetDetailClosure cl(st);
   iterate(&cl);
 }
 #endif // !PRODUCT
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -52,7 +52,7 @@
     return false;
   }
   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
-  return _g1h->heap_region_containing(obj)->is_old_or_humongous();
+  return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive();
 }
 
 inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -103,9 +103,9 @@
   return worker_count;
 }
 
-G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) :
+G1FullCollector::G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs) :
     _heap(heap),
-    _scope(memory_manager, explicit_gc, clear_soft_refs),
+    _scope(heap->g1mm(), explicit_gc, clear_soft_refs),
     _num_workers(calc_active_workers()),
     _oop_queue_set(_num_workers),
     _array_queue_set(_num_workers),
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -72,7 +72,7 @@
   ReferenceProcessorSubjectToDiscoveryMutator _is_subject_mutator;
 
 public:
-  G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
+  G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs);
   ~G1FullCollector();
 
   void prepare_collection();
--- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/g1FullGCScope.hpp"
 
-G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft) :
+G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft) :
     _rm(),
     _explicit_gc(explicit_gc),
     _g1h(G1CollectedHeap::heap()),
@@ -36,8 +36,7 @@
     _active(),
     _cpu_time(),
     _soft_refs(clear_soft, _g1h->soft_ref_policy()),
-    _collector_stats(_g1h->g1mm()->full_collection_counters()),
-    _memory_stats(memory_manager, _g1h->gc_cause()),
+    _monitoring_scope(monitoring_support, true /* full_gc */, true /* all_memory_pools_affected */),
     _heap_transition(_g1h) {
   _timer.register_gc_start();
   _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
--- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1HeapTransition.hpp"
-#include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.hpp"
@@ -51,12 +50,11 @@
   IsGCActiveMark          _active;
   GCTraceCPUTime          _cpu_time;
   ClearedAllSoftRefs      _soft_refs;
-  TraceCollectorStats     _collector_stats;
-  TraceMemoryManagerStats _memory_stats;
+  G1MonitoringScope       _monitoring_scope;
   G1HeapTransition        _heap_transition;
 
 public:
-  G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft);
+  G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft);
   ~G1FullGCScope();
 
   bool is_explicit_gc();
--- a/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapRegionTraceType.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,6 @@
     StartsHumongous,
     ContinuesHumongous,
     Old,
-    Pinned,
     OpenArchive,
     ClosedArchive,
     G1HeapRegionTypeEndSentinel
@@ -51,7 +50,6 @@
       case StartsHumongous:    return "Starts Humongous";
       case ContinuesHumongous: return "Continues Humongous";
       case Old:                return "Old";
-      case Pinned:             return "Pinned";
       case OpenArchive:        return "OpenArchive";
       case ClosedArchive:      return "ClosedArchive";
       default: ShouldNotReachHere(); return NULL;
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
   _eden_length = g1_heap->eden_regions_count();
   _survivor_length = g1_heap->survivor_regions_count();
   _old_length = g1_heap->old_regions_count();
+  _archive_length = g1_heap->archive_regions_count();
   _humongous_length = g1_heap->humongous_regions_count();
   _metaspace_used_bytes = MetaspaceUtils::used_bytes();
 }
@@ -43,16 +44,19 @@
   size_t _eden_used;
   size_t _survivor_used;
   size_t _old_used;
+  size_t _archive_used;
   size_t _humongous_used;
 
   size_t _eden_region_count;
   size_t _survivor_region_count;
   size_t _old_region_count;
+  size_t _archive_region_count;
   size_t _humongous_region_count;
 
   DetailedUsage() :
-    _eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0),
-    _eden_region_count(0), _survivor_region_count(0), _old_region_count(0), _humongous_region_count(0) {}
+    _eden_used(0), _survivor_used(0), _old_used(0), _archive_used(0), _humongous_used(0),
+    _eden_region_count(0), _survivor_region_count(0), _old_region_count(0),
+    _archive_region_count(0), _humongous_region_count(0) {}
 };
 
 class DetailedUsageClosure: public HeapRegionClosure {
@@ -62,6 +66,9 @@
     if (r->is_old()) {
       _usage._old_used += r->used();
       _usage._old_region_count++;
+    } else if (r->is_archive()) {
+      _usage._archive_used += r->used();
+      _usage._archive_region_count++;
     } else if (r->is_survivor()) {
       _usage._survivor_used += r->used();
       _usage._survivor_region_count++;
@@ -94,6 +101,8 @@
         after._survivor_length, usage._survivor_region_count);
     assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
         after._old_length, usage._old_region_count);
+    assert(usage._archive_region_count == after._archive_length, "Expected archive to be " SIZE_FORMAT " but was " SIZE_FORMAT,
+        after._archive_length, usage._archive_region_count);
     assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
         after._humongous_length, usage._humongous_region_count);
   }
@@ -112,6 +121,11 @@
   log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
       usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
 
+  log_info(gc, heap)("Archive regions: " SIZE_FORMAT "->" SIZE_FORMAT,
+                     _before._archive_length, after._archive_length);
+  log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
+      usage._archive_used / K, ((after._archive_length * HeapRegion::GrainBytes) - usage._archive_used) / K);
+
   log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
                      _before._humongous_length, after._humongous_length);
   log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
--- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
     size_t _eden_length;
     size_t _survivor_length;
     size_t _old_length;
+    size_t _archive_length;
     size_t _humongous_length;
     size_t _metaspace_used_bytes;
 
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -488,19 +488,22 @@
 class VerifyRegionListsClosure : public HeapRegionClosure {
 private:
   HeapRegionSet*   _old_set;
+  HeapRegionSet*   _archive_set;
   HeapRegionSet*   _humongous_set;
-  HeapRegionManager*   _hrm;
+  HeapRegionManager* _hrm;
 
 public:
   uint _old_count;
+  uint _archive_count;
   uint _humongous_count;
   uint _free_count;
 
   VerifyRegionListsClosure(HeapRegionSet* old_set,
+                           HeapRegionSet* archive_set,
                            HeapRegionSet* humongous_set,
                            HeapRegionManager* hrm) :
-    _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
-    _old_count(), _humongous_count(), _free_count(){ }
+    _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm),
+    _old_count(), _archive_count(), _humongous_count(), _free_count(){ }
 
   bool do_heap_region(HeapRegion* hr) {
     if (hr->is_young()) {
@@ -511,6 +514,9 @@
     } else if (hr->is_empty()) {
       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
       _free_count++;
+    } else if (hr->is_archive()) {
+      assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index());
+      _archive_count++;
     } else if (hr->is_old()) {
       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
       _old_count++;
@@ -523,8 +529,9 @@
     return false;
   }
 
-  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
+  void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
+    guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count);
     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
   }
@@ -539,9 +546,9 @@
   // Finally, make sure that the region accounting in the lists is
   // consistent with what we see in the heap.
 
-  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
   _g1h->heap_region_iterate(&cl);
-  cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
+  cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
 }
 
 void G1HeapVerifier::prepare_for_verify() {
@@ -755,6 +762,11 @@
         return true;
       }
       if (cset_state.is_in_cset()) {
+        if (hr->is_archive()) {
+          log_error(gc, verify)("## is_archive in collection set for region %u", i);
+          _failures = true;
+          return true;
+        }
         if (hr->is_young() != (cset_state.is_young())) {
           log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
                                hr->is_young(), cset_state.value(), i);
--- a/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -40,50 +40,41 @@
   assert(UseG1GC, "sanity");
 }
 
-G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
+G1EdenPool::G1EdenPool(G1CollectedHeap* g1h, size_t initial_size) :
   G1MemoryPoolSuper(g1h,
                     "G1 Eden Space",
-                    g1h->g1mm()->eden_space_committed(), /* init_size */
-                    _undefined_max,
+                    initial_size,
+                    MemoryUsage::undefined_size(),
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
-  size_t initial_sz = initial_size();
-  size_t max_sz     = max_size();
-  size_t used       = used_in_bytes();
   size_t committed  = _g1mm->eden_space_committed();
 
-  return MemoryUsage(initial_sz, used, committed, max_sz);
+  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
 
-G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
+G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size) :
   G1MemoryPoolSuper(g1h,
                     "G1 Survivor Space",
-                    g1h->g1mm()->survivor_space_committed(), /* init_size */
-                    _undefined_max,
+                    initial_size,
+                    MemoryUsage::undefined_size(),
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
-  size_t initial_sz = initial_size();
-  size_t max_sz     = max_size();
-  size_t used       = used_in_bytes();
   size_t committed  = _g1mm->survivor_space_committed();
 
-  return MemoryUsage(initial_sz, used, committed, max_sz);
+  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
 
-G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
+G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size) :
   G1MemoryPoolSuper(g1h,
                     "G1 Old Gen",
-                    g1h->g1mm()->old_space_committed(), /* init_size */
-                    g1h->g1mm()->old_gen_max(),
+                    initial_size,
+                    max_size,
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
-  size_t initial_sz = initial_size();
-  size_t max_sz     = max_size();
-  size_t used       = used_in_bytes();
-  size_t committed  = _g1mm->old_space_committed();
+  size_t committed  = _g1mm->old_gen_committed();
 
-  return MemoryUsage(initial_sz, used, committed, max_sz);
+  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
 }
--- a/src/hotspot/share/gc/g1/g1MemoryPool.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -53,7 +53,6 @@
 // (G1EdenPool, G1SurvivorPool, G1OldGenPool).
 class G1MemoryPoolSuper : public CollectedMemoryPool {
 protected:
-  const static size_t _undefined_max = (size_t) -1;
   G1MonitoringSupport* _g1mm;
 
   // Would only be called from subclasses.
@@ -67,42 +66,30 @@
 // Memory pool that represents the G1 eden.
 class G1EdenPool : public G1MemoryPoolSuper {
 public:
-  G1EdenPool(G1CollectedHeap* g1h);
+  G1EdenPool(G1CollectedHeap* g1h, size_t initial_size);
 
-  size_t used_in_bytes() {
-    return _g1mm->eden_space_used();
-  }
-  size_t max_size() const {
-    return _undefined_max;
-  }
+  size_t used_in_bytes() { return _g1mm->eden_space_used(); }
+
   MemoryUsage get_memory_usage();
 };
 
 // Memory pool that represents the G1 survivor.
 class G1SurvivorPool : public G1MemoryPoolSuper {
 public:
-  G1SurvivorPool(G1CollectedHeap* g1h);
+  G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size);
 
-  size_t used_in_bytes() {
-    return _g1mm->survivor_space_used();
-  }
-  size_t max_size() const {
-    return _undefined_max;
-  }
+  size_t used_in_bytes() { return _g1mm->survivor_space_used(); }
+
   MemoryUsage get_memory_usage();
 };
 
 // Memory pool that represents the G1 old gen.
 class G1OldGenPool : public G1MemoryPoolSuper {
 public:
-  G1OldGenPool(G1CollectedHeap* g1h);
+  G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size);
 
-  size_t used_in_bytes() {
-    return _g1mm->old_space_used();
-  }
-  size_t max_size() const {
-    return _g1mm->old_gen_max();
-  }
+  size_t used_in_bytes() { return _g1mm->old_gen_used(); }
+
   MemoryUsage get_memory_usage();
 };
 
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -26,83 +26,95 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
 #include "gc/g1/g1Policy.hpp"
-#include "gc/shared/collectorCounters.hpp"
+#include "gc/g1/g1MemoryPool.hpp"
 #include "gc/shared/hSpaceCounters.hpp"
 #include "memory/metaspaceCounters.hpp"
+#include "services/memoryPool.hpp"
 
-G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
-                                           const char* name,
-                                           int ordinal, int spaces,
-                                           size_t min_capacity,
-                                           size_t max_capacity,
-                                           size_t curr_capacity)
+class G1GenerationCounters : public GenerationCounters {
+protected:
+  G1MonitoringSupport* _g1mm;
+
+public:
+  G1GenerationCounters(G1MonitoringSupport* g1mm,
+                       const char* name, int ordinal, int spaces,
+                       size_t min_capacity, size_t max_capacity,
+                       size_t curr_capacity)
   : GenerationCounters(name, ordinal, spaces, min_capacity,
                        max_capacity, curr_capacity), _g1mm(g1mm) { }
+};
 
-// We pad the capacity three times given that the young generation
-// contains three spaces (eden and two survivors).
-G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
-                                                     const char* name)
+class G1YoungGenerationCounters : public G1GenerationCounters {
+public:
+  // We pad the capacity three times given that the young generation
+  // contains three spaces (eden and two survivors).
+  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size)
   : G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
-               G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
-               G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
-               G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
-  if (UsePerfData) {
-    update_all();
+                         G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
+                         G1MonitoringSupport::pad_capacity(max_size, 3),
+                         G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
+    if (UsePerfData) {
+      update_all();
+    }
   }
-}
 
-G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
-                                                 const char* name)
+  virtual void update_all() {
+    size_t committed =
+              G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
+    _current_size->set_value(committed);
+  }
+};
+
+class G1OldGenerationCounters : public G1GenerationCounters {
+public:
+  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size)
   : G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
-               G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
-               G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
-               G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
-  if (UsePerfData) {
-    update_all();
+                         G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
+                         G1MonitoringSupport::pad_capacity(max_size),
+                         G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
+    if (UsePerfData) {
+      update_all();
+    }
   }
-}
 
-void G1YoungGenerationCounters::update_all() {
-  size_t committed =
-            G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
-  _current_size->set_value(committed);
-}
-
-void G1OldGenerationCounters::update_all() {
-  size_t committed =
-            G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
-  _current_size->set_value(committed);
-}
+  virtual void update_all() {
+    size_t committed =
+              G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
+    _current_size->set_value(committed);
+  }
+};
 
 G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
   _g1h(g1h),
+  _incremental_memory_manager("G1 Young Generation", "end of minor GC"),
+  _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
+  _eden_space_pool(NULL),
+  _survivor_space_pool(NULL),
+  _old_gen_pool(NULL),
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
   _conc_collection_counters(NULL),
-  _young_collection_counters(NULL),
-  _old_collection_counters(NULL),
+  _young_gen_counters(NULL),
+  _old_gen_counters(NULL),
   _old_space_counters(NULL),
-  _eden_counters(NULL),
-  _from_counters(NULL),
-  _to_counters(NULL),
+  _eden_space_counters(NULL),
+  _from_space_counters(NULL),
+  _to_space_counters(NULL),
 
-  _overall_reserved(0),
   _overall_committed(0),
   _overall_used(0),
-  _young_region_num(0),
   _young_gen_committed(0),
-  _eden_committed(0),
-  _eden_used(0),
-  _survivor_committed(0),
-  _survivor_used(0),
-  _old_committed(0),
-  _old_used(0) {
+  _old_gen_committed(0),
 
-  _overall_reserved = g1h->max_capacity();
+  _eden_space_committed(0),
+  _eden_space_used(0),
+  _survivor_space_committed(0),
+  _survivor_space_used(0),
+  _old_gen_used(0) {
+
   recalculate_sizes();
 
-  // Counters for GC collections
+  // Counters for garbage collections
   //
   //  name "collector.0".  In a generational collector this would be the
   // young generation collection.
@@ -117,77 +129,96 @@
   _conc_collection_counters =
     new CollectorCounters("G1 stop-the-world phases", 2);
 
-  // timer sampling for all counters supporting sampling only update the
-  // used value.  See the take_sample() method.  G1 requires both used and
-  // capacity updated so sampling is not currently used.  It might
-  // be sufficient to update all counters in take_sample() even though
-  // take_sample() only returns "used".  When sampling was used, there
-  // were some anomolous values emitted which may have been the consequence
-  // of not updating all values simultaneously (i.e., see the calculation done
-  // in eden_space_used(), is it possible that the values used to
-  // calculate either eden_used or survivor_used are being updated by
-  // the collector when the sample is being done?).
-  const bool sampled = false;
-
   // "Generation" and "Space" counters.
   //
   //  name "generation.1" This is logically the old generation in
   // generational GC terms.  The "1, 1" parameters are for
   // the n-th generation (=1) with 1 space.
   // Counters are created from minCapacity, maxCapacity, and capacity
-  _old_collection_counters = new G1OldGenerationCounters(this, "old");
+  _old_gen_counters = new G1OldGenerationCounters(this, "old", _g1h->max_capacity());
 
   //  name  "generation.1.space.0"
   // Counters are created from maxCapacity, capacity, initCapacity,
   // and used.
-  _old_space_counters = new HSpaceCounters(_old_collection_counters->name_space(),
+  _old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(),
     "space", 0 /* ordinal */,
-    pad_capacity(overall_reserved()) /* max_capacity */,
-    pad_capacity(old_space_committed()) /* init_capacity */);
+    pad_capacity(g1h->max_capacity()) /* max_capacity */,
+    pad_capacity(_old_gen_committed) /* init_capacity */);
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
   //  The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
   // See  _old_collection_counters for additional counters
-  _young_collection_counters = new G1YoungGenerationCounters(this, "young");
+  _young_gen_counters = new G1YoungGenerationCounters(this, "young", _g1h->max_capacity());
 
-  const char* young_collection_name_space = _young_collection_counters->name_space();
+  const char* young_collection_name_space = _young_gen_counters->name_space();
 
   //  name "generation.0.space.0"
   // See _old_space_counters for additional counters
-  _eden_counters = new HSpaceCounters(young_collection_name_space,
+  _eden_space_counters = new HSpaceCounters(young_collection_name_space,
     "eden", 0 /* ordinal */,
-    pad_capacity(overall_reserved()) /* max_capacity */,
-    pad_capacity(eden_space_committed()) /* init_capacity */);
+    pad_capacity(g1h->max_capacity()) /* max_capacity */,
+    pad_capacity(_eden_space_committed) /* init_capacity */);
 
   //  name "generation.0.space.1"
   // See _old_space_counters for additional counters
   // Set the arguments to indicate that this survivor space is not used.
-  _from_counters = new HSpaceCounters(young_collection_name_space,
+  _from_space_counters = new HSpaceCounters(young_collection_name_space,
     "s0", 1 /* ordinal */,
     pad_capacity(0) /* max_capacity */,
     pad_capacity(0) /* init_capacity */);
+  // Given that this survivor space is not used, we update it here
+  // once to reflect that its used space is 0 so that we don't have to
+  // worry about updating it again later.
+  _from_space_counters->update_used(0);
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
-  _to_counters = new HSpaceCounters(young_collection_name_space,
+  _to_space_counters = new HSpaceCounters(young_collection_name_space,
     "s1", 2 /* ordinal */,
-    pad_capacity(overall_reserved()) /* max_capacity */,
-    pad_capacity(survivor_space_committed()) /* init_capacity */);
+    pad_capacity(g1h->max_capacity()) /* max_capacity */,
+    pad_capacity(_survivor_space_committed) /* init_capacity */);
+}
 
-  if (UsePerfData) {
-    // Given that this survivor space is not used, we update it here
-    // once to reflect that its used space is 0 so that we don't have to
-    // worry about updating it again later.
-    _from_counters->update_used(0);
-  }
+G1MonitoringSupport::~G1MonitoringSupport() {
+  delete _eden_space_pool;
+  delete _survivor_space_pool;
+  delete _old_gen_pool;
+}
+
+void G1MonitoringSupport::initialize_serviceability() {
+  _eden_space_pool = new G1EdenPool(_g1h, _eden_space_committed);
+  _survivor_space_pool = new G1SurvivorPool(_g1h, _survivor_space_committed);
+  _old_gen_pool = new G1OldGenPool(_g1h, _old_gen_committed, _g1h->max_capacity());
+
+  _full_gc_memory_manager.add_pool(_eden_space_pool);
+  _full_gc_memory_manager.add_pool(_survivor_space_pool);
+  _full_gc_memory_manager.add_pool(_old_gen_pool);
+
+  _incremental_memory_manager.add_pool(_eden_space_pool);
+  _incremental_memory_manager.add_pool(_survivor_space_pool);
+  _incremental_memory_manager.add_pool(_old_gen_pool, false /* always_affected_by_gc */);
+}
+
+GrowableArray<GCMemoryManager*> G1MonitoringSupport::memory_managers() {
+  GrowableArray<GCMemoryManager*> memory_managers(2);
+  memory_managers.append(&_incremental_memory_manager);
+  memory_managers.append(&_full_gc_memory_manager);
+  return memory_managers;
+}
+
+GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() {
+  GrowableArray<MemoryPool*> memory_pools(3);
+  memory_pools.append(_eden_space_pool);
+  memory_pools.append(_survivor_space_pool);
+  memory_pools.append(_old_gen_pool);
+  return memory_pools;
 }
 
 void G1MonitoringSupport::recalculate_sizes() {
-  // Recalculate all the sizes from scratch. We assume that this is
-  // called at a point where no concurrent updates to the various
-  // values we read here are possible (i.e., at a STW phase at the end
-  // of a GC).
+  assert_heap_locked_or_at_safepoint(true);
+
+  // Recalculate all the sizes from scratch.
 
   uint young_list_length = _g1h->young_regions_count();
   uint survivor_list_length = _g1h->survivor_regions_count();
@@ -200,14 +231,13 @@
   uint eden_list_max_length = young_list_max_length - survivor_list_length;
 
   _overall_used = _g1h->used_unlocked();
-  _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
-  _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
-  _young_region_num = young_list_length;
-  _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
+  _eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
+  _survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
+  _old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
 
   // First calculate the committed sizes that can be calculated independently.
-  _survivor_committed = _survivor_used;
-  _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
+  _survivor_space_committed = _survivor_space_used;
+  _old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
 
   // Next, start with the overall committed size.
   _overall_committed = _g1h->capacity();
@@ -215,70 +245,64 @@
 
   // Remove the committed size we have calculated so far (for the
   // survivor and old space).
-  assert(committed >= (_survivor_committed + _old_committed), "sanity");
-  committed -= _survivor_committed + _old_committed;
+  assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity");
+  committed -= _survivor_space_committed + _old_gen_committed;
 
   // Next, calculate and remove the committed size for the eden.
-  _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
+  _eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
   // Somewhat defensive: be robust in case there are inaccuracies in
   // the calculations
-  _eden_committed = MIN2(_eden_committed, committed);
-  committed -= _eden_committed;
+  _eden_space_committed = MIN2(_eden_space_committed, committed);
+  committed -= _eden_space_committed;
 
   // Finally, give the rest to the old space...
-  _old_committed += committed;
+  _old_gen_committed += committed;
   // ..and calculate the young gen committed.
-  _young_gen_committed = _eden_committed + _survivor_committed;
+  _young_gen_committed = _eden_space_committed + _survivor_space_committed;
 
   assert(_overall_committed ==
-         (_eden_committed + _survivor_committed + _old_committed),
+         (_eden_space_committed + _survivor_space_committed + _old_gen_committed),
          "the committed sizes should add up");
   // Somewhat defensive: cap the eden used size to make sure it
   // never exceeds the committed size.
-  _eden_used = MIN2(_eden_used, _eden_committed);
+  _eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
   // _survivor_committed and _old_committed are calculated in terms of
   // the corresponding _*_used value, so the next two conditions
   // should hold.
-  assert(_survivor_used <= _survivor_committed, "post-condition");
-  assert(_old_used <= _old_committed, "post-condition");
-}
-
-void G1MonitoringSupport::recalculate_eden_size() {
-  // When a new eden region is allocated, only the eden_used size is
-  // affected (since we have recalculated everything else at the last GC).
-
-  uint young_region_num = _g1h->young_regions_count();
-  if (young_region_num > _young_region_num) {
-    uint diff = young_region_num - _young_region_num;
-    _eden_used += (size_t) diff * HeapRegion::GrainBytes;
-    // Somewhat defensive: cap the eden used size to make sure it
-    // never exceeds the committed size.
-    _eden_used = MIN2(_eden_used, _eden_committed);
-    _young_region_num = young_region_num;
-  }
+  assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
+  assert(_old_gen_used <= _old_gen_committed, "post-condition");
 }
 
 void G1MonitoringSupport::update_sizes() {
   recalculate_sizes();
   if (UsePerfData) {
-    eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
-    eden_counters()->update_used(eden_space_used());
-    // only the to survivor space (s1) is active, so we don't need to
-    // update the counters for the from survivor space (s0)
-    to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
-    to_counters()->update_used(survivor_space_used());
-    old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
-    old_space_counters()->update_used(old_space_used());
-    old_collection_counters()->update_all();
-    young_collection_counters()->update_all();
+    _eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
+    _eden_space_counters->update_used(_eden_space_used);
+   // only the "to" survivor space is active, so we don't need to
+    // update the counters for the "from" survivor space
+    _to_space_counters->update_capacity(pad_capacity(_survivor_space_committed));
+    _to_space_counters->update_used(_survivor_space_used);
+    _old_space_counters->update_capacity(pad_capacity(_old_gen_committed));
+    _old_space_counters->update_used(_old_gen_used);
+
+    _young_gen_counters->update_all();
+    _old_gen_counters->update_all();
+
     MetaspaceCounters::update_performance_counters();
     CompressedClassSpaceCounters::update_performance_counters();
   }
 }
 
 void G1MonitoringSupport::update_eden_size() {
-  recalculate_eden_size();
+  // Recalculate everything - this is fast enough.
+  recalculate_sizes();
   if (UsePerfData) {
-    eden_counters()->update_used(eden_space_used());
+    _eden_space_counters->update_used(_eden_space_used);
   }
 }
+
+G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected) :
+  _tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters),
+  _tms(full_gc ? &g1mm->_full_gc_memory_manager : &g1mm->_incremental_memory_manager,
+       G1CollectedHeap::heap()->gc_cause(), all_memory_pools_affected) {
+}
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -25,11 +25,15 @@
 #ifndef SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
 #define SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
 
+#include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/generationCounters.hpp"
+#include "services/memoryManager.hpp"
+#include "services/memoryService.hpp"
 
 class CollectorCounters;
 class G1CollectedHeap;
 class HSpaceCounters;
+class MemoryPool;
 
 // Class for monitoring logical spaces in G1. It provides data for
 // both G1's jstat counters as well as G1's memory pools.
@@ -116,9 +120,18 @@
 
 class G1MonitoringSupport : public CHeapObj<mtGC> {
   friend class VMStructs;
+  friend class G1MonitoringScope;
 
   G1CollectedHeap* _g1h;
 
+  // java.lang.management MemoryManager and MemoryPool support
+  GCMemoryManager _incremental_memory_manager;
+  GCMemoryManager _full_gc_memory_manager;
+
+  MemoryPool* _eden_space_pool;
+  MemoryPool* _survivor_space_pool;
+  MemoryPool* _old_gen_pool;
+
   // jstat performance counters
   //  incremental collections both young and mixed
   CollectorCounters*   _incremental_collection_counters;
@@ -129,37 +142,36 @@
   //  young collection set counters.  The _eden_counters,
   // _from_counters, and _to_counters are associated with
   // this "generational" counter.
-  GenerationCounters*  _young_collection_counters;
+  GenerationCounters*  _young_gen_counters;
   //  old collection set counters. The _old_space_counters
   // below are associated with this "generational" counter.
-  GenerationCounters*  _old_collection_counters;
+  GenerationCounters*  _old_gen_counters;
   // Counters for the capacity and used for
   //   the whole heap
   HSpaceCounters*      _old_space_counters;
   //   the young collection
-  HSpaceCounters*      _eden_counters;
+  HSpaceCounters*      _eden_space_counters;
   //   the survivor collection (only one, _to_counters, is actively used)
-  HSpaceCounters*      _from_counters;
-  HSpaceCounters*      _to_counters;
+  HSpaceCounters*      _from_space_counters;
+  HSpaceCounters*      _to_space_counters;
 
   // When it's appropriate to recalculate the various sizes (at the
   // end of a GC, when a new eden region is allocated, etc.) we store
   // them here so that we can easily report them when needed and not
   // have to recalculate them every time.
 
-  size_t _overall_reserved;
   size_t _overall_committed;
   size_t _overall_used;
 
-  uint   _young_region_num;
   size_t _young_gen_committed;
-  size_t _eden_committed;
-  size_t _eden_used;
-  size_t _survivor_committed;
-  size_t _survivor_used;
+  size_t _old_gen_committed;
 
-  size_t _old_committed;
-  size_t _old_used;
+  size_t _eden_space_committed;
+  size_t _eden_space_used;
+  size_t _survivor_space_committed;
+  size_t _survivor_space_used;
+
+  size_t _old_gen_used;
 
   // It returns x - y if x > y, 0 otherwise.
   // As described in the comment above, some of the inputs to the
@@ -178,11 +190,16 @@
 
   // Recalculate all the sizes.
   void recalculate_sizes();
-  // Recalculate only what's necessary when a new eden region is allocated.
+
   void recalculate_eden_size();
 
- public:
+public:
   G1MonitoringSupport(G1CollectedHeap* g1h);
+  ~G1MonitoringSupport();
+
+  void initialize_serviceability();
+  GrowableArray<GCMemoryManager*> memory_managers();
+  GrowableArray<MemoryPool*> memory_pools();
 
   // Unfortunately, the jstat tool assumes that no space has 0
   // capacity. In our case, given that each space is logical, it's
@@ -202,73 +219,35 @@
   // Recalculate all the sizes from scratch and update all the jstat
   // counters accordingly.
   void update_sizes();
-  // Recalculate only what's necessary when a new eden region is
-  // allocated and update any jstat counters that need to be updated.
+
   void update_eden_size();
 
-  CollectorCounters* incremental_collection_counters() {
-    return _incremental_collection_counters;
-  }
-  CollectorCounters* full_collection_counters() {
-    return _full_collection_counters;
-  }
   CollectorCounters* conc_collection_counters() {
     return _conc_collection_counters;
   }
-  GenerationCounters* young_collection_counters() {
-    return _young_collection_counters;
-  }
-  GenerationCounters* old_collection_counters() {
-    return _old_collection_counters;
-  }
-  HSpaceCounters*      old_space_counters() { return _old_space_counters; }
-  HSpaceCounters*      eden_counters() { return _eden_counters; }
-  HSpaceCounters*      from_counters() { return _from_counters; }
-  HSpaceCounters*      to_counters() { return _to_counters; }
 
   // Monitoring support used by
   //   MemoryService
   //   jstat counters
   //   Tracing
 
-  size_t overall_reserved()           { return _overall_reserved;     }
-  size_t overall_committed()          { return _overall_committed;    }
-  size_t overall_used()               { return _overall_used;         }
+  size_t young_gen_committed()        { return _young_gen_committed; }
 
-  size_t young_gen_committed()        { return _young_gen_committed;  }
-  size_t young_gen_max()              { return overall_reserved();    }
-  size_t eden_space_committed()       { return _eden_committed;       }
-  size_t eden_space_used()            { return _eden_used;            }
-  size_t survivor_space_committed()   { return _survivor_committed;   }
-  size_t survivor_space_used()        { return _survivor_used;        }
+  size_t eden_space_committed()       { return _eden_space_committed; }
+  size_t eden_space_used()            { return _eden_space_used; }
+  size_t survivor_space_committed()   { return _survivor_space_committed; }
+  size_t survivor_space_used()        { return _survivor_space_used; }
 
-  size_t old_gen_committed()          { return old_space_committed(); }
-  size_t old_gen_max()                { return overall_reserved();    }
-  size_t old_space_committed()        { return _old_committed;        }
-  size_t old_space_used()             { return _old_used;             }
+  size_t old_gen_committed()          { return _old_gen_committed; }
+  size_t old_gen_used()               { return _old_gen_used; }
 };
 
-class G1GenerationCounters: public GenerationCounters {
-protected:
-  G1MonitoringSupport* _g1mm;
-
+// Scope object for java.lang.management support.
+class G1MonitoringScope : public StackObj {
+  TraceCollectorStats _tcs;
+  TraceMemoryManagerStats _tms;
 public:
-  G1GenerationCounters(G1MonitoringSupport* g1mm,
-                       const char* name, int ordinal, int spaces,
-                       size_t min_capacity, size_t max_capacity,
-                       size_t curr_capacity);
-};
-
-class G1YoungGenerationCounters: public G1GenerationCounters {
-public:
-  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
-  virtual void update_all();
-};
-
-class G1OldGenerationCounters: public G1GenerationCounters {
-public:
-  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
-  virtual void update_all();
+  G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected);
 };
 
 #endif // SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -132,7 +132,7 @@
 
     virtual bool do_heap_region(HeapRegion* r) {
       uint hrm_index = r->hrm_index();
-      if (!r->in_collection_set() && r->is_old_or_humongous()) {
+      if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) {
         _scan_top[hrm_index] = r->top();
       } else {
         _scan_top[hrm_index] = r->bottom();
@@ -571,7 +571,7 @@
   // In the normal (non-stale) case, the synchronization between the
   // enqueueing of the card and processing it here will have ensured
   // we see the up-to-date region type here.
-  if (!r->is_old_or_humongous()) {
+  if (!r->is_old_or_humongous_or_archive()) {
     return;
   }
 
@@ -600,7 +600,7 @@
       // Check whether the region formerly in the cache should be
       // ignored, as discussed earlier for the original card.  The
       // region could have been freed while in the cache.
-      if (!r->is_old_or_humongous()) {
+      if (!r->is_old_or_humongous_or_archive()) {
         return;
       }
     } // Else we still have the original card.
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -226,6 +226,7 @@
   RegionTypeCounter _humongous;
   RegionTypeCounter _free;
   RegionTypeCounter _old;
+  RegionTypeCounter _archive;
   RegionTypeCounter _all;
 
   size_t _max_rs_mem_sz;
@@ -248,7 +249,7 @@
 
 public:
   HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
-    _free("Free"), _old("Old"), _all("All"),
+    _free("Free"), _old("Old"), _archive("Archive"), _all("All"),
     _max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL),
     _max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL)
   {}
@@ -280,6 +281,8 @@
       current = &_humongous;
     } else if (r->is_old()) {
       current = &_old;
+    } else if (r->is_archive()) {
+      current = &_archive;
     } else {
       ShouldNotReachHere();
     }
@@ -290,7 +293,7 @@
   }
 
   void print_summary_on(outputStream* out) {
-    RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL };
+    RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, &_archive, NULL };
 
     out->print_cr(" Current rem set statistics");
     out->print_cr("  Total per region rem sets sizes = " SIZE_FORMAT "%s."
--- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -141,8 +141,9 @@
 void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 
-  if (r->is_old_or_humongous()) {
+  if (r->is_old_or_humongous_or_archive()) {
     if (r->rem_set()->is_updating()) {
+      assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index());
       r->rem_set()->set_state_complete();
     }
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -27,7 +27,7 @@
 #include "gc/g1/g1SATBMarkQueueSet.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
 #include "oops/oop.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
 #define SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
 
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
 
 class G1CollectedHeap;
 class JavaThread;
--- a/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ThreadLocalData.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -26,7 +26,7 @@
 
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/satbMarkQueue.hpp"
+#include "gc/shared/satbMarkQueue.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/sizes.hpp"
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -426,6 +426,8 @@
 
   bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
 
+  bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); }
+
   // A pinned region contains objects which are not moved by garbage collections.
   // Humongous regions and archive regions are pinned.
   bool is_pinned() const { return _type.is_pinned(); }
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -350,7 +350,7 @@
   if (is_humongous()) {
     return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
   }
-  assert(is_old(), "precondition");
+  assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
 
   // Because mr has been trimmed to what's been allocated in this
   // region, the parts of the heap that are examined here are always
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,41 @@
 #include "gc/g1/heapRegionSet.inline.hpp"
 #include "memory/allocation.hpp"
 
+class MasterFreeRegionListChecker : public HeapRegionSetChecker {
+public:
+  void check_mt_safety() {
+    // Master Free List MT safety protocol:
+    // (a) If we're at a safepoint, operations on the master free list
+    // should be invoked by either the VM thread (which will serialize
+    // them) or by the GC workers while holding the
+    // FreeList_lock.
+    // (b) If we're not at a safepoint, operations on the master free
+    // list should be invoked while holding the Heap_lock.
+
+    if (SafepointSynchronize::is_at_safepoint()) {
+      guarantee(Thread::current()->is_VM_thread() ||
+                FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
+    } else {
+      guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
+    }
+  }
+  bool is_correct_type(HeapRegion* hr) { return hr->is_free(); }
+  const char* get_description() { return "Free Regions"; }
+};
+
+HeapRegionManager::HeapRegionManager() :
+  _regions(), _heap_mapper(NULL),
+  _prev_bitmap_mapper(NULL),
+  _next_bitmap_mapper(NULL),
+  _bot_mapper(NULL),
+  _cardtable_mapper(NULL),
+  _card_counts_mapper(NULL),
+  _free_list("Free list", new MasterFreeRegionListChecker()),
+  _available_map(mtGC),
+  _num_committed(0),
+  _allocated_heapregions_length(0)
+{ }
+
 void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
                                G1RegionToSpaceMapper* prev_bitmap,
                                G1RegionToSpaceMapper* next_bitmap,
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -128,14 +128,7 @@
 
  public:
   // Empty constructor, we'll initialize it with the initialize() method.
-  HeapRegionManager() :
-   _regions(), _heap_mapper(NULL),
-   _prev_bitmap_mapper(NULL), _next_bitmap_mapper(NULL), _bot_mapper(NULL),
-   _cardtable_mapper(NULL), _card_counts_mapper(NULL),
-   _free_list("Free list", new MasterFreeRegionListMtSafeChecker()),
-   _available_map(mtGC), _num_committed(0),
-   _allocated_heapregions_length(0)
-  { }
+  HeapRegionManager();
 
   void initialize(G1RegionToSpaceMapper* heap_storage,
                   G1RegionToSpaceMapper* prev_bitmap,
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Aug 23 22:07:27 2018 +0200
@@ -33,8 +33,8 @@
 void HeapRegionSetBase::verify_region(HeapRegion* hr) {
   assert(hr->containing_set() == this, "Inconsistent containing set for %u", hr->hrm_index());
   assert(!hr->is_young(), "Adding young region %u", hr->hrm_index()); // currently we don't use these sets for young regions
-  assert(hr->is_humongous() == regions_humongous(), "Wrong humongous state for region %u and set %s", hr->hrm_index(), name());
-  assert(hr->is_free() == regions_free(), "Wrong free state for region %u and set %s", hr->hrm_index(), name());
+  assert(_checker == NULL || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
+         hr->hrm_index(), hr->get_type_str(), name());
   assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name());
   assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
          "Empty region %u is not free or archive for set %s", hr->hrm_index(), name());
@@ -75,21 +75,14 @@
 void HeapRegionSetBase::print_on(outputStream* out, bool print_contents) {
   out->cr();
   out->print_cr("Set: %s (" PTR_FORMAT ")", name(), p2i(this));
-  out->print_cr("  Region Assumptions");
-  out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
-  out->print_cr("    free              : %s", BOOL_TO_STR(regions_free()));
-  out->print_cr("  Attributes");
-  out->print_cr("    length            : %14u", length());
+  out->print_cr("  Region Type         : %s", _checker->get_description());
+  out->print_cr("  Length              : %14u", length());
 }
 
-HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
-  : _is_humongous(humongous),
-    _is_free(free),
-    _mt_safety_checker(mt_safety_checker),
-    _length(0),
-    _name(name),
-    _verify_in_progress(false)
-{ }
+HeapRegionSetBase::HeapRegionSetBase(const char* name, HeapRegionSetChecker* checker)
+  : _checker(checker), _length(0), _name(name), _verify_in_progress(false)
+{
+}
 
 void FreeRegionList::set_unrealistically_long_length(uint len) {
   guarantee(_unrealistically_long_length == 0, "should only be set once");
@@ -295,73 +288,3 @@
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
 }
-
-// Note on the check_mt_safety() methods below:
-//
-// Verification of the "master" heap region sets / lists that are
-// maintained by G1CollectedHeap is always done during a STW pause and
-// by the VM thread at the start / end of the pause. The standard
-// verification methods all assert check_mt_safety(). This is
-// important as it ensures that verification is done without
-// concurrent updates taking place at the same time. It follows, that,
-// for the "master" heap region sets / lists, the check_mt_safety()
-// method should include the VM thread / STW case.
-
-void MasterFreeRegionListMtSafeChecker::check() {
-  // Master Free List MT safety protocol:
-  // (a) If we're at a safepoint, operations on the master free list
-  // should be invoked by either the VM thread (which will serialize
-  // them) or by the GC workers while holding the
-  // FreeList_lock.
-  // (b) If we're not at a safepoint, operations on the master free
-  // list should be invoked while holding the Heap_lock.
-
-  if (SafepointSynchronize::is_at_safepoint()) {
-    guarantee(Thread::current()->is_VM_thread() ||
-              FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");
-  } else {
-    guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");
-  }
-}
-
-void OldRegionSetMtSafeChecker::check() {
-  // Master Old Set MT safety protocol:
-  // (a) If we're at a safepoint, operations on the master old set
-  // should be invoked:
-  // - by the VM thread (which will serialize them), or
-  // - by the GC workers while holding the FreeList_lock, if we're
-  //   at a safepoint for an evacuation pause (this lock is taken
-  //   anyway when an GC alloc region is retired so that a new one
-  //   is allocated from the free list), or
-  // - by the GC workers while holding the OldSets_lock, if we're at a
-  //   safepoint for a cleanup pause.
-  // (b) If we're not at a safepoint, operations on the master old set
-  // should be invoked while holding the Heap_lock.
-
-  if (SafepointSynchronize::is_at_safepoint()) {
-    guarantee(Thread::current()->is_VM_thread()
-        || FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
-        "master old set MT safety protocol at a safepoint");
-  } else {
-    guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
-  }
-}
-
-void HumongousRegionSetMtSafeChecker::check() {
-  // Humongous Set MT safety protocol:
-  // (a) If we're at a safepoint, operations on the master humongous
-  // set should be invoked by either the VM thread (which will
-  // serialize them) or by the GC workers while holding the
-  // OldSets_lock.
-  // (b) If we're not at a safepoint, operations on the master
-  // humongous set should be invoked while holding the Heap_lock.
-
-  if (SafepointSynchronize::is_at_safepoint()) {
-    guarantee(Thread::current()->is_VM_thread() ||
-              OldSets_lock->owned_by_self(),
-              "master humongous set MT safety protocol at a safepoint");
-  } else {
-    guarantee(Heap_lock->owned_by_self(),
-              "master humongous set MT safety protocol outside a safepoint");
-  }
-}
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -47,15 +47,18 @@
   } while (0)
 
 
-class HRSMtSafeChecker : public CHeapObj<mtGC> {
+// Interface collecting various instance specific verification methods of
+// HeapRegionSets.
+class HeapRegionSetChecker : public CHeapObj<mtGC> {
 public:
-  virtual void check() = 0;
+  // Verify MT safety for this HeapRegionSet.
+  virtual void check_mt_safety() = 0;
+  // Returns true if the given HeapRegion is of the correct type for this HeapRegionSet.
+  virtual bool is_correct_type(HeapRegion* hr) = 0;
+  // Return a description of the type of regions this HeapRegionSet contains.
+  virtual const char* get_description() = 0;
 };
 
-class MasterFreeRegionListMtSafeChecker    : public HRSMtSafeChecker { public: void check(); };
-class HumongousRegionSetMtSafeChecker      : public HRSMtSafeChecker { public: void check(); };
-class OldRegionSetMtSafeChecker            : public HRSMtSafeChecker { public: void check(); };
-
 // Base class for all the classes that represent heap region sets. It
 // contains the basic attributes that each set needs to maintain
 // (e.g., length, region num, used bytes sum) plus any shared
@@ -63,10 +66,8 @@
 
 class HeapRegionSetBase {
   friend class VMStructs;
-private:
-  bool _is_humongous;
-  bool _is_free;
-  HRSMtSafeChecker* _mt_safety_checker;
+
+  HeapRegionSetChecker* _checker;
 
 protected:
   // The number of regions in to the set.
@@ -80,21 +81,13 @@
   // added to / removed from a set are consistent.
   void verify_region(HeapRegion* hr) PRODUCT_RETURN;
 
-  // Indicates whether all regions in the set should be humongous or
-  // not. Only used during verification.
-  bool regions_humongous() { return _is_humongous; }
-
-  // Indicates whether all regions in the set should be free or
-  // not. Only used during verification.
-  bool regions_free() { return _is_free; }
-
   void check_mt_safety() {
-    if (_mt_safety_checker != NULL) {
-      _mt_safety_checker->check();
+    if (_checker != NULL) {
+      _checker->check_mt_safety();
     }
   }
 
-  HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker);
+  HeapRegionSetBase(const char* name, HeapRegionSetChecker* verifier);
 
 public:
   const char* name() { return _name; }
@@ -121,15 +114,6 @@
   virtual void print_on(outputStream* out, bool print_contents = false);
 };
 
-#define hrs_assert_sets_match(_set1_, _set2_)                                  \
-  do {                                                                         \
-    assert(((_set1_)->regions_humongous() == (_set2_)->regions_humongous()) && \
-           ((_set1_)->regions_free() == (_set2_)->regions_free()),             \
-           "the contents of set %s and set %s should match",                   \
-           (_set1_)->name(),                                                   \
-           (_set2_)->name());                                                  \
-  } while (0)
-
 // This class represents heap region sets whose members are not
 // explicitly tracked. It's helpful to group regions using such sets
 // so that we can reason about all the region groups in the heap using
@@ -137,8 +121,9 @@
 
 class HeapRegionSet : public HeapRegionSetBase {
 public:
-  HeapRegionSet(const char* name, bool humongous, HRSMtSafeChecker* mt_safety_checker):
-    HeapRegionSetBase(name, humongous, false /* free */, mt_safety_checker) { }
+  HeapRegionSet(const char* name, HeapRegionSetChecker* checker):
+    HeapRegionSetBase(name, checker) {
+  }
 
   void bulk_remove(const uint removed) {
     _length -= removed;
@@ -173,8 +158,8 @@
   virtual void clear();
 
 public:
-  FreeRegionList(const char* name, HRSMtSafeChecker* mt_safety_checker = NULL):
-    HeapRegionSetBase(name, false /* humongous */, true /* empty */, mt_safety_checker) {
+  FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL):
+    HeapRegionSetBase(name, checker) {
     clear();
   }
 
--- a/src/hotspot/share/gc/g1/heapRegionType.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionType.hpp	Thu Aug 23 22:07:27 2018 +0200
@@ -86,8 +86,8 @@
     // Objects within these regions are allowed to have references to objects
     // contained in any other kind of regions.
     ArchiveMask           = 32,
-    OpenArchiveTag        = ArchiveMask | PinnedMask | OldMask,
-    ClosedArchiveTag      = ArchiveMask | PinnedMask | OldMask + 1
+    OpenArchiveTag        = ArchiveMask | PinnedMask,
+    ClosedArchiveTag      = ArchiveMask | PinnedMask + 1
   } Tag;
 
   volatile Tag _tag;
@@ -139,6 +139,8 @@
 
   bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; }
 
+  bool is_old_or_humongous_or_archive() const { return (get() & (OldMask | HumongousMask | ArchiveMask)) != 0; }
+
   // is_pinned regions may be archive or humongous
   bool is_pinned() const { return (get() & PinnedMask) != 0; }
 
--- a/src/hotspot/share/gc/g1/ptrQueue.cpp	Thu Aug 16 22:07:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/ptrQueue.hpp"
-#include "memory/allocation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/thread.inline.hpp"
-
-#include <new>
-
-PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
-  _qset(qset),
-  _active(active),
-  _permanent(permanent),
-  _index(0),
-  _capacity_in_bytes(0),
-  _buf(NULL),
-  _lock(NULL)
-{}
-
-PtrQueue::~PtrQueue() {
-  assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
-}
-
-void PtrQueue::flush_impl() {
-  if (_buf != NULL) {
-    BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
-    if (is_empty()) {
-      // No work to do.
-      qset()->deallocate_buffer(node);
-    } else {
-      qset()->enqueue_complete_buffer(node);
-    }
-    _buf = NULL;
-    set_index(0);
-  }
-}
-
-
-void PtrQueue::enqueue_known_active(void* ptr) {
-  while (_index == 0) {
-    handle_zero_index();
-  }
-
-  assert(_buf != NULL, "postcondition");
-  assert(index() > 0, "postcondition");
-  assert(index() <= capacity(), "invariant");
-  _index -= _element_size;
-  _buf[index()] = ptr;
-}
-
-void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
-  assert(_lock->owned_by_self(), "Required.");
-  qset()->enqueue_complete_buffer(node);
-}
-
-
-BufferNode* BufferNode::allocate(size_t size) {
-  size_t byte_size = size * sizeof(void*);
-  void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
-  return new (data) BufferNode;
-}
-
-void BufferNode::deallocate(BufferNode* node) {
-  node->~BufferNode();
-  FREE_C_HEAP_ARRAY(char, node);
-}
-
-PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
-  _buffer_size(0),
-  _cbl_mon(NULL),
-  _completed_buffers_head(NULL),
-  _completed_buffers_tail(NULL),
-  _n_completed_buffers(0),
-  _process_completed_threshold(0),
-  _process_completed(false),
-  _fl_lock(NULL),
-  _buf_free_list(NULL),
-  _buf_free_list_sz(0),
-  _fl_owner(NULL),
-  _all_active(false),
-  _notify_when_complete(notify_when_complete),
-  _max_completed_queue(0),
-  _completed_queue_padding(0)
-{
-  _fl_owner = this;
-}
-
-PtrQueueSet::~PtrQueueSet() {
-  // There are presently only a couple (derived) instances ever
-  // created, and they are permanent, so no harm currently done by
-  // doing nothing here.
-}
-
-void PtrQueueSet::initialize(Monitor* cbl_mon,
-                             Mutex* fl_lock,
-                             int process_completed_threshold,
-                             int max_completed_queue,
-                             PtrQueueSet *fl_owner) {
-  _max_completed_queue = max_completed_queue;
-  _process_completed_threshold = process_completed_threshold;
-  _completed_queue_padding = 0;
-  assert(cbl_mon != NULL && fl_lock != NULL, "Init order issue?");
-  _cbl_mon = cbl_mon;
-  _fl_lock = fl_lock;
-  _fl_owner = (fl_owner != NULL) ? fl_owner : this;
-}
-
-void** PtrQueueSet::allocate_buffer() {
-  BufferNode* node = NULL;
-  {
-    MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-    node = _fl_owner->_buf_free_list;
-    if (node != NULL) {
-      _fl_owner->_buf_free_list = node->next();
-      _fl_owner->_buf_free_list_sz--;
-    }
-  }
-  if (node == NULL) {
-    node = BufferNode::allocate(buffer_size());
-  } else {
-    // Reinitialize buffer obtained from free list.
-    node->set_index(0);
-    node->set_next(NULL);
-  }
-  return BufferNode::make_buffer_from_node(node);
-}
-
-void PtrQueueSet::deallocate_buffer(BufferNode* node) {
-  MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
-  node->set_next(_fl_owner->_buf_free_list);
-  _fl_owner->_buf_free_list = node;
-  _fl_owner->_buf_free_list_sz++;
-}
-
-void PtrQueueSet::reduce_free_list() {
-  assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
-  // For now we'll adopt the strategy of deleting half.
-  MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
-  size_t n = _buf_free_list_sz / 2;
-  for (size_t i = 0; i < n; ++i) {
-    assert(_buf_free_list != NULL,
-           "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
-    BufferNode* node = _buf_free_list;
-    _buf_free_list = node->next();
-    _buf_free_list_sz--;
-    BufferNode::deallocate(node);
-  }
-}
-
-void PtrQueue::handle_zero_index() {
-  assert(index() == 0, "precondition");
-
-  // This thread records the full buffer and allocates a new one (while
-  // holding the lock if there is one).
-  if (_buf != NULL) {
-    if (!should_enqueue_buffer()) {
-      assert(index() > 0, "the buffer can only be re-used if it's not full");
-      return;
-    }
-
-    if (_lock) {
-      assert(_lock->owned_by_self(), "Required.");
-
-      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
-      _buf = NULL;         // clear shared _buf field
-
-      locking_enqueue_completed_buffer(node); // enqueue completed buffer
-      assert(_buf == NULL, "multiple enqueuers appear to be racing");
-    } else {
-      BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
-      if (qset()->process_or_enqueue_complete_buffer(node)) {
-        // Recycle the buffer. No allocation.
-        assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
-        assert(capacity() == qset()->buffer_size(), "invariant");
-        reset();
-        return;
-      }
-    }
-  }
-  // Set capacity in case this is the first allocation.
-  set_capacity(qset()->buffer_size());
-  // Allocate a new buffer.
-  _buf = qset()->allocate_buffer();
-  reset();
-}
-
-bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
-  if (Thread::current()->is_Java_thread()) {
-    // We don't lock. It is fine to be epsilon-precise here.
-    if (_max_completed_queue == 0 ||
-        (_max_completed_queue > 0 &&
-          _n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) {
-      bool b = mut_process_buffer(node);
-      if (b) {
-        // True here means that the buffer hasn't been deallocated and the caller may reuse it.
-        return true;
-      }
-    }
-  }
-  // The buffer will be enqueued. The caller will have to get a new one.
-  enqueue_complete_buffer(node);
-  return false;
-}
-
-void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  cbn->set_next(NULL);
-  if (_completed_buffers_tail == NULL) {
-    assert(_completed_buffers_head == NULL, "Well-formedness");
-    _completed_buffers_head = cbn;
-    _completed_buffers_tail = cbn;
-  } else {
-    _completed_buffers_tail->set_next(cbn);
-    _completed_buffers_tail = cbn;
-  }
-  _n_completed_buffers++;
-
-  if (!_process_completed && _process_completed_threshold >= 0 &&
-      _n_completed_buffers >= (size_t)_process_completed_threshold) {
-    _process_completed = true;
-    if (_notify_when_complete) {
-      _cbl_mon->notify();
-    }
-  }
-  DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
-}
-
-size_t PtrQueueSet::completed_buffers_list_length() {
-  size_t n = 0;
-  BufferNode* cbn = _completed_buffers_head;
-  while (cbn != NULL) {
-    n++;
-    cbn = cbn->next();
-  }
-  return n;
-}
-
-void PtrQueueSet::assert_completed_buffer_list_len_correct() {
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  assert_completed_buffer_list_len_correct_locked();
-}
-
-void PtrQueueSet::assert_completed_buffer_list_len_correct_locked() {
-  guarantee(completed_buffers_list_length() ==  _n_completed_buffers,
-            "Completed buffer length is wrong.");
-}
-
-void PtrQueueSet::set_buffer_size(size_t sz) {
-  assert(_buffer_size == 0 && sz > 0, "Should be called only once.");
-  _buffer_size = sz;
-}
-
-// Merge lists of buffers. Notify the processing threads.
-// The source queue is emptied as a result. The queues
-// must share the monitor.
-void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
-  assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  if (_completed_buffers_tail == NULL) {
-    assert(_completed_buffers_head == NULL, "Well-formedness");
-    _completed_buffers_head = src->_completed_buffers_head;
-    _completed_buffers_tail = src->_completed_buffers_tail;
-  } else {
-    assert(_completed_buffers_head != NULL, "Well formedness");
-    if (src->_completed_buffers_head != NULL) {
-      _completed_buffers_tail->set_next(src->_completed_buffers_head);
-      _completed_buffers_tail = src->_completed_buffers_tail;
-    }
-  }
-  _n_completed_buffers += src->_n_completed_buffers;
-
-  src->_n_completed_buffers = 0;
-  src->_completed_buffers_head = NULL;
-  src->_completed_buffers_tail = NULL;
-
-  assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
-         _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
-         "Sanity");
-}
-
-void PtrQueueSet::notify_if_necessary() {
-  MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  assert(_process_completed_threshold >= 0, "_process_completed is negative");
-  if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
-    _process_completed = true;
-    if (_notify_when_complete)
-      _cbl_mon->notify();
-  }
-}
--- a/src/hotspot/share/gc/g1/ptrQueue.hpp	Thu Aug 16 22:07:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,371 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_PTRQUEUE_HPP
-#define SHARE_VM_GC_G1_PTRQUEUE_HPP
-
-#include "utilities/align.hpp"
-#include "utilities/sizes.hpp"
-
-// There are various techniques that require threads to be able to log
-// addresses.  For example, a generational write barrier might log
-// the addresses of modified old-generation objects.  This type supports
-// this operation.
-
-class BufferNode;
-class PtrQueueSet;
-class PtrQueue {
-  friend class VMStructs;
-
-  // Noncopyable - not defined.
-  PtrQueue(const PtrQueue&);
-  PtrQueue& operator=(const PtrQueue&);
-
-  // The ptr queue set to which this queue belongs.
-  PtrQueueSet* const _qset;
-
-  // Whether updates should be logged.
-  bool _active;
-
-  // If true, the queue is permanent, and doesn't need to deallocate
-  // its buffer in the destructor (since that obtains a lock which may not
-  // be legally locked by then.
-  const bool _permanent;
-
-  // The (byte) index at which an object was last enqueued.  Starts at
-  // capacity_in_bytes (indicating an empty buffer) and goes towards zero.
-  // Value is always pointer-size aligned.
-  size_t _index;
-
-  // Size of the current buffer, in bytes.
-  // Value is always pointer-size aligned.
-  size_t _capacity_in_bytes;
-
-  static const size_t _element_size = sizeof(void*);
-
-  // Get the capacity, in bytes.  The capacity must have been set.
-  size_t capacity_in_bytes() const {
-    assert(_capacity_in_bytes > 0, "capacity not set");
-    return _capacity_in_bytes;
-  }
-
-  void set_capacity(size_t entries) {
-    size_t byte_capacity = index_to_byte_index(entries);
-    assert(_capacity_in_bytes == 0 || _capacity_in_bytes == byte_capacity,
-           "changing capacity " SIZE_FORMAT " -> " SIZE_FORMAT,
-           _capacity_in_bytes, byte_capacity);
-    _capacity_in_bytes = byte_capacity;
-  }
-
-  static size_t byte_index_to_index(size_t ind) {
-    assert(is_aligned(ind, _element_size), "precondition");
-    return ind / _element_size;
-  }
-
-  static size_t index_to_byte_index(size_t ind) {
-    return ind * _element_size;
-  }
-
-protected:
-  // The buffer.
-  void** _buf;
-
-  size_t index() const {
-    return byte_index_to_index(_index);
-  }
-
-  void set_index(size_t new_index) {
-    size_t byte_index = index_to_byte_index(new_index);
-    assert(byte_index <= capacity_in_bytes(), "precondition");
-    _index = byte_index;
-  }
-
-  size_t capacity() const {
-    return byte_index_to_index(capacity_in_bytes());
-  }
-
-  // If there is a lock associated with this buffer, this is that lock.
-  Mutex* _lock;
-
-  PtrQueueSet* qset() { return _qset; }
-  bool is_permanent() const { return _permanent; }
-
-  // Process queue entries and release resources.
-  void flush_impl();
-
-  // Initialize this queue to contain a null buffer, and be part of the
-  // given PtrQueueSet.
-  PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
-
-  // Requires queue flushed or permanent.
-  ~PtrQueue();
-
-public:
-
-  // Associate a lock with a ptr queue.
-  void set_lock(Mutex* lock) { _lock = lock; }
-
-  // Forcibly set empty.
-  void reset() {
-    if (_buf != NULL) {
-      _index = capacity_in_bytes();
-    }
-  }
-
-  void enqueue(volatile void* ptr) {
-    enqueue((void*)(ptr));
-  }
-
-  // Enqueues the given "obj".
-  void enqueue(void* ptr) {
-    if (!_active) return;
-    else enqueue_known_active(ptr);
-  }
-
-  // This method is called when we're doing the zero index handling
-  // and gives a chance to the queues to do any pre-enqueueing
-  // processing they might want to do on the buffer. It should return
-  // true if the buffer should be enqueued, or false if enough
-  // entries were cleared from it so that it can be re-used. It should
-  // not return false if the buffer is still full (otherwise we can
-  // get into an infinite loop).
-  virtual bool should_enqueue_buffer() { return true; }
-  void handle_zero_index();
-  void locking_enqueue_completed_buffer(BufferNode* node);
-
-  void enqueue_known_active(void* ptr);
-
-  // Return the size of the in-use region.
-  size_t size() const {
-    size_t result = 0;
-    if (_buf != NULL) {
-      assert(_index <= capacity_in_bytes(), "Invariant");
-      result = byte_index_to_index(capacity_in_bytes() - _index);
-    }
-    return result;
-  }
-
-  bool is_empty() const {
-    return _buf == NULL || capacity_in_bytes() == _index;
-  }
-
-  // Set the "active" property of the queue to "b".  An enqueue to an
-  // inactive thread is a no-op.  Setting a queue to inactive resets its
-  // log to the empty state.
-  void set_active(bool b) {
-    _active = b;
-    if (!b && _buf != NULL) {
-      reset();
-    } else if (b && _buf != NULL) {
-      assert(index() == capacity(),
-             "invariant: queues are empty when activated.");
-    }
-  }
-
-  bool is_active() const { return _active; }
-
-  // To support compiler.
-
-protected:
-  template<typename Derived>
-  static ByteSize byte_offset_of_index() {
-    return byte_offset_of(Derived, _index);
-  }
-
-  static ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
-
-  template<typename Derived>
-  static ByteSize byte_offset_of_buf() {
-    return byte_offset_of(Derived, _buf);
-  }
-
-  static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
-
-  template<typename Derived>
-  static ByteSize byte_offset_of_active() {
-    return byte_offset_of(Derived, _active);
-  }
-
-  static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
-
-};
-
-class BufferNode {
-  size_t _index;
-  BufferNode* _next;
-  void* _buffer[1];             // Pseudo flexible array member.
-
-  BufferNode() : _index(0), _next(NULL) { }
-  ~BufferNode() { }
-
-  static size_t buffer_offset() {
-    return offset_of(BufferNode, _buffer);
-  }
-
-public:
-  BufferNode* next() const     { return _next;  }
-  void set_next(BufferNode* n) { _next = n;     }
-  size_t index() const         { return _index; }
-  void set_index(size_t i)     { _index = i; }
-
-  // Allocate a new BufferNode with the "buffer" having size elements.
-  static BufferNode* allocate(size_t size);
-
-  // Free a BufferNode.
-  static void deallocate(BufferNode* node);
-
-  // Return the BufferNode containing the buffer, after setting its index.
-  static BufferNode* make_node_from_buffer(void** buffer, size_t index) {
-    BufferNode* node =
-      reinterpret_cast<BufferNode*>(
-        reinterpret_cast<char*>(buffer) - buffer_offset());
-    node->set_index(index);
-    return node;
-  }
-
-  // Return the buffer for node.
-  static void** make_buffer_from_node(BufferNode *node) {
-    // &_buffer[0] might lead to index out of bounds warnings.
-    return reinterpret_cast<void**>(
-      reinterpret_cast<char*>(node) + buffer_offset());
-  }
-};
-
-// A PtrQueueSet represents resources common to a set of pointer queues.
-// In particular, the individual queues allocate buffers from this shared
-// set, and return completed buffers to the set.
-// All these variables are are protected by the TLOQ_CBL_mon. XXX ???
-class PtrQueueSet {
-  // The size of all buffers in the set.
-  size_t _buffer_size;
-
-protected:
-  Monitor* _cbl_mon;  // Protects the fields below.
-  BufferNode* _completed_buffers_head;
-  BufferNode* _completed_buffers_tail;
-  size_t _n_completed_buffers;
-  int _process_completed_threshold;
-  volatile bool _process_completed;
-
-  // This (and the interpretation of the first element as a "next"
-  // pointer) are protected by the TLOQ_FL_lock.
-  Mutex* _fl_lock;
-  BufferNode* _buf_free_list;
-  size_t _buf_free_list_sz;
-  // Queue set can share a freelist. The _fl_owner variable
-  // specifies the owner. It is set to "this" by default.
-  PtrQueueSet* _fl_owner;
-
-  bool _all_active;
-
-  // If true, notify_all on _cbl_mon when the threshold is reached.
-  bool _notify_when_complete;
-
-  // Maximum number of elements allowed on completed queue: after that,
-  // enqueuer does the work itself.  Zero indicates no maximum.
-  int _max_completed_queue;
-  size_t _completed_queue_padding;
-
-  size_t completed_buffers_list_length();
-  void assert_completed_buffer_list_len_correct_locked();
-  void assert_completed_buffer_list_len_correct();
-
-protected:
-  // A mutator thread does the the work of processing a buffer.
-  // Returns "true" iff the work is complete (and the buffer may be
-  // deallocated).
-  virtual bool mut_process_buffer(BufferNode* node) {
-    ShouldNotReachHere();
-    return false;
-  }
-
-  // Create an empty ptr queue set.
-  PtrQueueSet(bool notify_when_complete = false);
-  ~PtrQueueSet();
-
-  // Because of init-order concerns, we can't pass these