changeset 52072:ca29d5cf9e8f switch

Automatic merge with default
author mcimadamore
date Thu, 09 Aug 2018 22:06:11 +0200
parents 1bdd7722a048 d9439d4b15e2
children 7afd61192cd4
files src/java.base/share/classes/module-info.java test/hotspot/jtreg/vmTestbase/vm/mlvm/meth/share/transform/v2/MHSamTF.java
diffstat 695 files changed, 16802 insertions(+), 3695 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Aug 02 22:06:18 2018 +0200
+++ b/.hgtags	Thu Aug 09 22:06:11 2018 +0200
@@ -498,7 +498,10 @@
 9937ef7499dcd7673714517fd5e450410c14ba4e jdk-11+22
 1edcf36fe15f79d6228d1a63eb680878e2386480 jdk-11+23
 ea900a7dc7d77dee30865c60eabd87fc24b1037c jdk-11+24
+331888ea4a788df801b1edf8836646cd25fc758b jdk-11+25
+945ba9278a272a5477ffb1b3ea1b04174fed8036 jdk-11+26
 69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
 990db216e7199b2ba9989d8fa20b657e0ca7d969 jdk-12+3
 499b873761d8e8a1cc4aa649daf04cbe98cbce77 jdk-12+4
 f8696e0ab9b795030429fc3374ec03e378fd9ed7 jdk-12+5
+7939b3c4e4088bf4f70ec5bbd8030393b653372f jdk-12+6
--- a/make/autoconf/flags-cflags.m4	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/autoconf/flags-cflags.m4	Thu Aug 09 22:06:11 2018 +0200
@@ -543,14 +543,14 @@
   fi
   if test "x$TOOLCHAIN_TYPE" = xgcc; then
     WARNING_CFLAGS_JDK="-Wall -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2"
-    WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wunused-value -Woverloaded-virtual"
+    WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wunused-value -Woverloaded-virtual -Wreorder"
 
     if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
       # Non-zero builds have stricter warnings
       WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wreturn-type"
     fi
   elif test "x$TOOLCHAIN_TYPE" = xclang; then
-    WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wno-deprecated"
+    WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wno-deprecated -Wreorder"
     if test "x$OPENJDK_TARGET_OS" = xlinux; then
       WARNING_CFLAGS_JVM="$WARNING_CFLAGS_JVM -Wno-sometimes-uninitialized"
       WARNING_CFLAGS_JDK="-Wall -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/charsetmapping/IBM1129.c2b	Thu Aug 09 22:06:11 2018 +0200
@@ -0,0 +1,94 @@
+0x21	U+ff01
+0x22	U+ff02
+0x23	U+ff03
+0x24	U+ff04
+0x25	U+ff05
+0x26	U+ff06
+0x27	U+ff07
+0x28	U+ff08
+0x29	U+ff09
+0x2a	U+ff0a
+0x2b	U+ff0b
+0x2c	U+ff0c
+0x2d	U+ff0d
+0x2e	U+ff0e
+0x2f	U+ff0f
+0x30	U+ff10
+0x31	U+ff11
+0x32	U+ff12
+0x33	U+ff13
+0x34	U+ff14
+0x35	U+ff15
+0x36	U+ff16
+0x37	U+ff17
+0x38	U+ff18
+0x39	U+ff19
+0x3a	U+ff1a
+0x3b	U+ff1b
+0x3c	U+ff1c
+0x3d	U+ff1d
+0x3e	U+ff1e
+0x3f	U+ff1f
+0x40	U+ff20
+0x41	U+ff21
+0x42	U+ff22
+0x43	U+ff23
+0x44	U+ff24
+0x45	U+ff25
+0x46	U+ff26
+0x47	U+ff27
+0x48	U+ff28
+0x49	U+ff29
+0x4a	U+ff2a
+0x4b	U+ff2b
+0x4c	U+ff2c
+0x4d	U+ff2d
+0x4e	U+ff2e
+0x4f	U+ff2f
+0x50	U+ff30
+0x51	U+ff31
+0x52	U+ff32
+0x53	U+ff33
+0x54	U+ff34
+0x55	U+ff35
+0x56	U+ff36
+0x57	U+ff37
+0x58	U+ff38
+0x59	U+ff39
+0x5a	U+ff3a
+0x5b	U+ff3b
+0x5c	U+ff3c
+0x5d	U+ff3d
+0x5e	U+ff3e
+0x5f	U+ff3f
+0x60	U+ff40
+0x61	U+ff41
+0x62	U+ff42
+0x63	U+ff43
+0x64	U+ff44
+0x65	U+ff45
+0x66	U+ff46
+0x67	U+ff47
+0x68	U+ff48
+0x69	U+ff49
+0x6a	U+ff4a
+0x6b	U+ff4b
+0x6c	U+ff4c
+0x6d	U+ff4d
+0x6e	U+ff4e
+0x6f	U+ff4f
+0x70	U+ff50
+0x71	U+ff51
+0x72	U+ff52
+0x73	U+ff53
+0x74	U+ff54
+0x75	U+ff55
+0x76	U+ff56
+0x77	U+ff57
+0x78	U+ff58
+0x79	U+ff59
+0x7a	U+ff5a
+0x7b	U+ff5b
+0x7c	U+ff5c
+0x7d	U+ff5d
+0x7e	U+ff5e
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/charsetmapping/IBM1129.map	Thu Aug 09 22:06:11 2018 +0200
@@ -0,0 +1,256 @@
+0x00	U+0000
+0x01	U+0001
+0x02	U+0002
+0x03	U+0003
+0x04	U+0004
+0x05	U+0005
+0x06	U+0006
+0x07	U+0007
+0x08	U+0008
+0x09	U+0009
+0x0a	U+000a
+0x0b	U+000b
+0x0c	U+000c
+0x0d	U+000d
+0x0e	U+000e
+0x0f	U+000f
+0x10	U+0010
+0x11	U+0011
+0x12	U+0012
+0x13	U+0013
+0x14	U+0014
+0x15	U+0015
+0x16	U+0016
+0x17	U+0017
+0x18	U+0018
+0x19	U+0019
+0x1a	U+001a
+0x1b	U+001b
+0x1c	U+001c
+0x1d	U+001d
+0x1e	U+001e
+0x1f	U+001f
+0x20	U+0020
+0x21	U+0021
+0x22	U+0022
+0x23	U+0023
+0x24	U+0024
+0x25	U+0025
+0x26	U+0026
+0x27	U+0027
+0x28	U+0028
+0x29	U+0029
+0x2a	U+002a
+0x2b	U+002b
+0x2c	U+002c
+0x2d	U+002d
+0x2e	U+002e
+0x2f	U+002f
+0x30	U+0030
+0x31	U+0031
+0x32	U+0032
+0x33	U+0033
+0x34	U+0034
+0x35	U+0035
+0x36	U+0036
+0x37	U+0037
+0x38	U+0038
+0x39	U+0039
+0x3a	U+003a
+0x3b	U+003b
+0x3c	U+003c
+0x3d	U+003d
+0x3e	U+003e
+0x3f	U+003f
+0x40	U+0040
+0x41	U+0041
+0x42	U+0042
+0x43	U+0043
+0x44	U+0044
+0x45	U+0045
+0x46	U+0046
+0x47	U+0047
+0x48	U+0048
+0x49	U+0049
+0x4a	U+004a
+0x4b	U+004b
+0x4c	U+004c
+0x4d	U+004d
+0x4e	U+004e
+0x4f	U+004f
+0x50	U+0050
+0x51	U+0051
+0x52	U+0052
+0x53	U+0053
+0x54	U+0054
+0x55	U+0055
+0x56	U+0056
+0x57	U+0057
+0x58	U+0058
+0x59	U+0059
+0x5a	U+005a
+0x5b	U+005b
+0x5c	U+005c
+0x5d	U+005d
+0x5e	U+005e
+0x5f	U+005f
+0x60	U+0060
+0x61	U+0061
+0x62	U+0062
+0x63	U+0063
+0x64	U+0064
+0x65	U+0065
+0x66	U+0066
+0x67	U+0067
+0x68	U+0068
+0x69	U+0069
+0x6a	U+006a
+0x6b	U+006b
+0x6c	U+006c
+0x6d	U+006d
+0x6e	U+006e
+0x6f	U+006f
+0x70	U+0070
+0x71	U+0071
+0x72	U+0072
+0x73	U+0073
+0x74	U+0074
+0x75	U+0075
+0x76	U+0076
+0x77	U+0077
+0x78	U+0078
+0x79	U+0079
+0x7a	U+007a
+0x7b	U+007b
+0x7c	U+007c
+0x7d	U+007d
+0x7e	U+007e
+0x7f	U+007f
+0x80	U+0080
+0x81	U+0081
+0x82	U+0082
+0x83	U+0083
+0x84	U+0084
+0x85	U+0085
+0x86	U+0086
+0x87	U+0087
+0x88	U+0088
+0x89	U+0089
+0x8a	U+008a
+0x8b	U+008b
+0x8c	U+008c
+0x8d	U+008d
+0x8e	U+008e
+0x8f	U+008f
+0x90	U+0090
+0x91	U+0091
+0x92	U+0092
+0x93	U+0093
+0x94	U+0094
+0x95	U+0095
+0x96	U+0096
+0x97	U+0097
+0x98	U+0098
+0x99	U+0099
+0x9a	U+009a
+0x9b	U+009b
+0x9c	U+009c
+0x9d	U+009d
+0x9e	U+009e
+0x9f	U+009f
+0xa0	U+00a0
+0xa1	U+00a1
+0xa2	U+00a2
+0xa3	U+00a3
+0xa4	U+00a4
+0xa5	U+00a5
+0xa6	U+00a6
+0xa7	U+00a7
+0xa8	U+0153
+0xa9	U+00a9
+0xaa	U+00aa
+0xab	U+00ab
+0xac	U+00ac
+0xad	U+00ad
+0xae	U+00ae
+0xaf	U+00af
+0xb0	U+00b0
+0xb1	U+00b1
+0xb2	U+00b2
+0xb3	U+00b3
+0xb4	U+0178
+0xb5	U+00b5
+0xb6	U+00b6
+0xb7	U+00b7
+0xb8	U+0152
+0xb9	U+00b9
+0xba	U+00ba
+0xbb	U+00bb
+0xbc	U+00bc
+0xbd	U+00bd
+0xbe	U+00be
+0xbf	U+00bf
+0xc0	U+00c0
+0xc1	U+00c1
+0xc2	U+00c2
+0xc3	U+0102
+0xc4	U+00c4
+0xc5	U+00c5
+0xc6	U+00c6
+0xc7	U+00c7
+0xc8	U+00c8
+0xc9	U+00c9
+0xca	U+00ca
+0xcb	U+00cb
+0xcc	U+0300
+0xcd	U+00cd
+0xce	U+00ce
+0xcf	U+00cf
+0xd0	U+0110
+0xd1	U+00d1
+0xd2	U+0309
+0xd3	U+00d3
+0xd4	U+00d4
+0xd5	U+01a0
+0xd6	U+00d6
+0xd7	U+00d7
+0xd8	U+00d8
+0xd9	U+00d9
+0xda	U+00da
+0xdb	U+00db
+0xdc	U+00dc
+0xdd	U+01af
+0xde	U+0303
+0xdf	U+00df
+0xe0	U+00e0
+0xe1	U+00e1
+0xe2	U+00e2
+0xe3	U+0103
+0xe4	U+00e4
+0xe5	U+00e5
+0xe6	U+00e6
+0xe7	U+00e7
+0xe8	U+00e8
+0xe9	U+00e9
+0xea	U+00ea
+0xeb	U+00eb
+0xec	U+0301
+0xed	U+00ed
+0xee	U+00ee
+0xef	U+00ef
+0xf0	U+0111
+0xf1	U+00f1
+0xf2	U+0323
+0xf3	U+00f3
+0xf4	U+00f4
+0xf5	U+01a1
+0xf6	U+00f6
+0xf7	U+00f7
+0xf8	U+00f8
+0xf9	U+00f9
+0xfa	U+00fa
+0xfb	U+00fb
+0xfc	U+00fc
+0xfd	U+01b0
+0xfe	U+20ab
+0xff	U+00ff
--- a/make/data/charsetmapping/charsets	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/data/charsetmapping/charsets	Thu Aug 09 22:06:11 2018 +0200
@@ -1241,6 +1241,16 @@
     alias   ibm-1124
     alias   1124
 
+charset x-IBM1129 IBM1129
+    package sun.nio.cs.ext
+    type    sbcs
+    hisname Cp1129
+    ascii   false
+    alias   cp1129               # JDK historical
+    alias   ibm1129
+    alias   ibm-1129
+    alias   1129
+
 charset x-IBM1364 IBM1364
     package sun.nio.cs.ext
     type    ebcdic
--- a/make/data/charsetmapping/stdcs-aix	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/data/charsetmapping/stdcs-aix	Thu Aug 09 22:06:11 2018 +0200
@@ -19,6 +19,7 @@
 IBM970
 IBM1046
 IBM1124
+IBM1129
 IBM1383
 ISO_8859_6
 ISO_8859_8
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Thu Aug 09 22:06:11 2018 +0200
@@ -123,6 +123,7 @@
 $(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done
 	($(CD) $(GENSRC_DIR)/META-INF/providers && \
 	    p=""; \
+	    impl=""; \
 	    for i in $$($(LS) | $(SORT)); do \
 	      c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
 	      if test x$$p != x$$c; then \
@@ -131,15 +132,27 @@
 	        fi; \
 	        $(ECHO) "provides $$c with" >> $@; \
                 p=$$c; \
+	        impl=""; \
 	      fi; \
-	      $(ECHO) "    $$i," >> $@; \
+              if test x$$impl != x; then \
+	        $(ECHO) "  , $$i" >> $@; \
+              else \
+	        $(ECHO) "    $$i" >> $@; \
+              fi; \
+              impl=$$i; \
 	    done); \
             $(ECHO) "    ;" >> $@; \
 	$(ECHO) "uses org.graalvm.compiler.options.OptionDescriptors;" >> $@; \
 	$(ECHO) "provides org.graalvm.compiler.options.OptionDescriptors with" >> $@; \
+        impl=""; \
 	for i in $$($(FIND) $(GENSRC_DIR) -name '*_OptionDescriptors.java' | $(SORT)); do \
 	    c=$$($(ECHO) $$i | $(SED) 's:.*/jdk\.internal\.vm\.compiler/\(.*\)\.java:\1:' | $(TR) '/' '.'); \
-	    $(ECHO) "    $$c," >> $@; \
+            if test x$$impl != x; then \
+	      $(ECHO) "  , $$c" >> $@; \
+            else \
+	      $(ECHO) "    $$c" >> $@; \
+            fi; \
+            impl=$$c; \
 	done; \
 	$(ECHO) "    ;" >> $@;
 
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -40,6 +40,7 @@
 
 #include <proc_service.h>
 #include "gc/shared/collectedHeap.hpp"
+#include "memory/heap.hpp"
 #include "runtime/vmStructs.hpp"
 
 typedef enum GEN_variant {
--- a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Thu Aug 09 22:06:11 2018 +0200
@@ -37,7 +37,9 @@
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import static java.util.stream.Collectors.*;
 
@@ -131,7 +133,7 @@
         // parse module-info.java.extra
         this.extras = new ModuleInfo();
         for (Path file : extraFiles) {
-            extras.parse(file);
+            extras.parseExtra(file);
         }
 
         // merge with module-info.java.extra
@@ -177,6 +179,7 @@
         final Map<String, Statement> provides = new HashMap<>();
 
         Statement getStatement(String directive, String name) {
+            Objects.requireNonNull(name);
             switch (directive) {
                 case "exports":
                     if (moduleInfo.exports.containsKey(name) &&
@@ -223,49 +226,49 @@
             extraFiles.exports.entrySet()
                 .stream()
                 .filter(e -> exports.containsKey(e.getKey()) &&
-                                e.getValue().filter(modules))
+                    e.getValue().filter(modules))
                 .forEach(e -> mergeExportsOrOpens(exports.get(e.getKey()),
-                                                  e.getValue(),
-                                                  modules));
+                    e.getValue(),
+                    modules));
 
             // add exports that are not defined in the original module-info.java
             extraFiles.exports.entrySet()
                 .stream()
                 .filter(e -> !exports.containsKey(e.getKey()) &&
-                                e.getValue().filter(modules))
+                    e.getValue().filter(modules))
                 .forEach(e -> addTargets(getStatement("exports", e.getKey()),
-                                         e.getValue(),
-                                         modules));
+                    e.getValue(),
+                    modules));
 
             // API package opened in the original module-info.java
             extraFiles.opens.entrySet()
                 .stream()
                 .filter(e -> opens.containsKey(e.getKey()) &&
-                                e.getValue().filter(modules))
+                    e.getValue().filter(modules))
                 .forEach(e -> mergeExportsOrOpens(opens.get(e.getKey()),
-                                                  e.getValue(),
-                                                  modules));
+                    e.getValue(),
+                    modules));
 
             // add opens that are not defined in the original module-info.java
             extraFiles.opens.entrySet()
                 .stream()
                 .filter(e -> !opens.containsKey(e.getKey()) &&
-                                e.getValue().filter(modules))
+                    e.getValue().filter(modules))
                 .forEach(e -> addTargets(getStatement("opens", e.getKey()),
-                                         e.getValue(),
-                                         modules));
+                    e.getValue(),
+                    modules));
 
             // provides
             extraFiles.provides.keySet()
                 .stream()
                 .filter(service -> provides.containsKey(service))
                 .forEach(service -> mergeProvides(service,
-                                                  extraFiles.provides.get(service)));
+                    extraFiles.provides.get(service)));
             extraFiles.provides.keySet()
                 .stream()
                 .filter(service -> !provides.containsKey(service))
                 .forEach(service -> provides.put(service,
-                                                 extraFiles.provides.get(service)));
+                    extraFiles.provides.get(service)));
 
             // uses
             extraFiles.uses.keySet()
@@ -280,8 +283,8 @@
                                 Set<String> modules)
         {
             extra.targets.stream()
-                 .filter(mn -> modules.contains(mn))
-                 .forEach(mn -> statement.addTarget(mn));
+                .filter(mn -> modules.contains(mn))
+                .forEach(mn -> statement.addTarget(mn));
         }
 
         private void mergeExportsOrOpens(Statement statement,
@@ -319,7 +322,7 @@
             }
 
             extra.targets.stream()
-                 .forEach(mn -> statement.addTarget(mn));
+                .forEach(mn -> statement.addTarget(mn));
         }
 
 
@@ -358,189 +361,173 @@
                 .forEach(e -> writer.println(e.getValue()));
         }
 
-        private void parse(Path sourcefile) throws IOException {
-            List<String> lines = Files.readAllLines(sourcefile);
-            Statement statement = null;
-            boolean hasTargets = false;
 
-            for (int lineNumber = 1; lineNumber <= lines.size(); ) {
-                String l = lines.get(lineNumber-1).trim();
-                int index = 0;
+        private void parse(Path file) throws IOException {
+            Parser parser = new Parser(file);
+            parser.run();
+            if (verbose) {
+                parser.dump();
+            }
+            process(parser, false);
+        }
 
-                if (l.isEmpty()) {
-                    lineNumber++;
+        private void parseExtra(Path file) throws IOException {
+            Parser parser = new Parser(file);
+            parser.run();
+            if (verbose) {
+                parser.dump();
+            }
+            process(parser, true);
+        }
+
+
+        private void process(Parser parser, boolean extraFile) throws IOException {
+            // no duplicate statement local in each file
+            Map<String, Statement> exports = new HashMap<>();
+            Map<String, Statement> opens = new HashMap<>();
+            Map<String, Statement> uses = new HashMap<>();
+            Map<String, Statement> provides = new HashMap<>();
+
+            String token = null;
+            boolean hasCurlyBracket = false;
+            while ((token = parser.nextToken()) != null) {
+                if (token.equals("module")) {
+                    String modulename = nextIdentifier(parser);
+                    if (extraFile) {
+                        throw parser.newError("cannot declare module in " + parser.sourceFile);
+                    }
+                    skipTokenOrThrow(parser, "{", "missing {");
+                    hasCurlyBracket = true;
+                } else if (token.equals("requires")) {
+                    token = nextIdentifier(parser);
+                    if (token.equals("transitive")) {
+                        token = nextIdentifier(parser);
+                    }
+                    if (extraFile) {
+                        throw parser.newError("cannot declare requires in " + parser.sourceFile);
+                    }
+                    skipTokenOrThrow(parser, ";", "missing semicolon");
+                } else if (isExportsOpensProvidesUses(token)) {
+                    // new statement
+                    String keyword = token;
+                    String name = nextIdentifier(parser);
+                    Statement statement = getStatement(keyword, name);
+                    switch (keyword) {
+                        case "exports":
+                            if (exports.containsKey(name)) {
+                                throw parser.newError("multiple " + keyword + " " + name);
+                            }
+                            exports.put(name, statement);
+                            break;
+                        case "opens":
+                            if (opens.containsKey(name)) {
+                                throw parser.newError("multiple " + keyword + " " + name);
+                            }
+                            opens.put(name, statement);
+                            break;
+                        case "uses":
+                            if (uses.containsKey(name)) {
+                                throw parser.newError("multiple " + keyword + " " + name);
+                            }
+                            uses.put(name, statement);
+                            break;
+                        /*  Disable this check until jdk.internal.vm.compiler generated file is fixed.
+                        case "provides":
+                            if (provides.containsKey(name)) {
+                                throw parser.newError("multiple " + keyword + " " + name);
+                            }
+                            provides.put(name, statement);
+                            break;
+                        */
+                    }
+                    String lookAhead = lookAhead(parser);
+                    if (lookAhead.equals(statement.qualifier)) {
+                        parser.nextToken(); // skip qualifier
+                        while ((lookAhead = parser.peekToken()) != null) {
+                            // add target name
+                            name = nextIdentifier(parser);
+                            statement.addTarget(name);
+                            lookAhead = lookAhead(parser);
+                            if (lookAhead.equals(",") || lookAhead.equals(";")) {
+                                parser.nextToken();
+                            } else {
+                                throw parser.newError("missing semicolon");
+                            }
+                            if (lookAhead.equals(";")) {
+                                break;
+                            }
+                        }
+                    } else {
+                        skipTokenOrThrow(parser, ";", "missing semicolon");
+                    }
+                } else if (token.equals(";")) {
                     continue;
+                } else if (hasCurlyBracket && token.equals("}")) {
+                    hasCurlyBracket = false;
+                    if (parser.peekToken() != null) {  // must be EOF
+                        throw parser.newError("is malformed");
+                    }
+                } else {
+                    throw parser.newError("missing keyword");
                 }
+            }
+            if (hasCurlyBracket) {
+                parser.newError("missing }");
+            }
+        }
 
-                // comment block starts
-                if (l.startsWith("/*")) {
-                    while (l.indexOf("*/") == -1) { // end comment block
-                        l = lines.get(lineNumber++).trim();
-                    }
-                    index = l.indexOf("*/") + 2;
-                    if (index >= l.length()) {
-                        lineNumber++;
-                        continue;
-                    } else {
-                        // rest of the line
-                        l = l.substring(index, l.length()).trim();
-                        index = 0;
-                    }
-                }
+        private boolean isExportsOpensProvidesUses(String word) {
+            switch (word) {
+                case "exports":
+                case "opens":
+                case "provides":
+                case "uses":
+                    return true;
+                default:
+                    return false;
+            }
+        }
 
-                // skip comment and annotations
-                if (l.startsWith("//") || l.startsWith("@")) {
-                    lineNumber++;
-                    continue;
-                }
+        private String lookAhead(Parser parser) {
+            String lookAhead = parser.peekToken();
+            if (lookAhead == null) { // EOF
+                throw parser.newError("reach end of file");
+            }
+            return lookAhead;
+        }
 
-                int current = lineNumber;
-                int count = 0;
-                while (index < l.length()) {
-                    if (current == lineNumber && ++count > 20)
-                        throw new Error("Fail to parse line " + lineNumber + " " + sourcefile);
+        private String nextIdentifier(Parser parser) {
+            String lookAhead = parser.peekToken();
+            boolean maybeIdentifier = true;
+            switch (lookAhead) {
+                case "module":
+                case "requires":
+                case "exports":
+                case "opens":
+                case "provides":
+                case "uses":
+                case "to":
+                case "with":
+                case ",":
+                case ";":
+                case "{":
+                case "}":
+                    maybeIdentifier = false;
+            }
+            if (lookAhead == null || !maybeIdentifier) {
+                throw parser.newError("<identifier> missing");
+            }
 
-                    int end = l.indexOf(';');
-                    if (end == -1)
-                        end = l.length();
-                    String content = l.substring(0, end).trim();
-                    if (content.isEmpty()) {
-                        index = end+1;
-                        if (index < l.length()) {
-                            // rest of the line
-                            l = l.substring(index, l.length()).trim();
-                            index = 0;
-                        }
-                        continue;
-                    }
+            return parser.nextToken();
+        }
 
-                    String[] s = content.split("\\s+");
-                    String keyword = s[0].trim();
-
-                    String name = s.length > 1 ? s[1].trim() : null;
-                    trace("%d: %s index=%d len=%d%n", lineNumber, l, index, l.length());
-                    switch (keyword) {
-                        case "module":
-                        case "requires":
-                        case "}":
-                            index = l.length();  // skip to the end
-                            continue;
-
-                        case "exports":
-                        case "opens":
-                        case "provides":
-                        case "uses":
-                            // assume name immediately after exports, opens, provides, uses
-                            statement = getStatement(keyword, name);
-                            hasTargets = false;
-
-                            int i = l.indexOf(name, keyword.length()+1) + name.length() + 1;
-                            l = i < l.length() ? l.substring(i, l.length()).trim() : "";
-                            index = 0;
-
-                            if (s.length >= 3) {
-                                if (!s[2].trim().equals(statement.qualifier)) {
-                                    throw new RuntimeException(sourcefile + ", line " +
-                                        lineNumber + ", is malformed: " + s[2]);
-                                }
-                            }
-
-                            break;
-
-                        case "to":
-                        case "with":
-                            if (statement == null) {
-                                throw new RuntimeException(sourcefile + ", line " +
-                                    lineNumber + ", is malformed");
-                            }
-
-                            hasTargets = true;
-                            String qualifier = statement.qualifier;
-                            i = l.indexOf(qualifier, index) + qualifier.length() + 1;
-                            l = i < l.length() ? l.substring(i, l.length()).trim() : "";
-                            index = 0;
-                            break;
-                    }
-
-                    if (index >= l.length()) {
-                        // skip to next line
-                        continue;
-                    }
-
-                        // comment block starts
-                    if (l.startsWith("/*")) {
-                        while (l.indexOf("*/") == -1) { // end comment block
-                            l = lines.get(lineNumber++).trim();
-                        }
-                        index = l.indexOf("*/") + 2;
-                        if (index >= l.length()) {
-                            continue;
-                        } else {
-                            // rest of the line
-                            l = l.substring(index, l.length()).trim();
-                            index = 0;
-                        }
-                    }
-
-                    if (l.startsWith("//")) {
-                        index = l.length();
-                        continue;
-                    }
-
-                    if (statement == null) {
-                        throw new RuntimeException(sourcefile + ", line " +
-                            lineNumber + ": missing keyword?");
-                    }
-
-                    if (!hasTargets) {
-                        continue;
-                    }
-
-                    if (index >= l.length()) {
-                        throw new RuntimeException(sourcefile + ", line " +
-                            lineNumber + ": " + l);
-                    }
-
-                    // parse the target module of exports, opens, or provides
-                    Statement stmt = statement;
-
-                    int terminal = l.indexOf(';', index);
-                    // determine up to which position to parse
-                    int pos = terminal != -1 ? terminal : l.length();
-                    // parse up to comments
-                    int pos1 = l.indexOf("//", index);
-                    if (pos1 != -1 && pos1 < pos) {
-                        pos = pos1;
-                    }
-                    int pos2 = l.indexOf("/*", index);
-                    if (pos2 != -1 && pos2 < pos) {
-                        pos = pos2;
-                    }
-                    // target module(s) for qualitifed exports or opens
-                    // or provider implementation class(es)
-                    String rhs = l.substring(index, pos).trim();
-                    index += rhs.length();
-                    trace("rhs: index=%d [%s] [line: %s]%n", index, rhs, l);
-
-                    String[] targets = rhs.split(",");
-                    for (String t : targets) {
-                        String n = t.trim();
-                        if (n.length() > 0)
-                            stmt.addTarget(n);
-                    }
-
-                    // start next statement
-                    if (pos == terminal) {
-                        statement = null;
-                        hasTargets = false;
-                        index = terminal + 1;
-                    }
-                    l = index < l.length() ? l.substring(index, l.length()).trim() : "";
-                    index = 0;
-                }
-
-                lineNumber++;
+        private String skipTokenOrThrow(Parser parser, String token, String msg) {
+            // look ahead to report the proper line number
+            String lookAhead = parser.peekToken();
+            if (!token.equals(lookAhead)) {
+                throw parser.newError(msg);
             }
+            return parser.nextToken();
         }
     }
 
@@ -620,4 +607,175 @@
             System.out.format(fmt, params);
         }
     }
+
+    static class Parser {
+        private static final List<String> EMPTY = List.of();
+
+        private final Path sourceFile;
+        private boolean inCommentBlock = false;
+        private List<List<String>> tokens = new ArrayList<>();
+        private int lineNumber = 1;
+        private int index = 0;
+
+        Parser(Path file) {
+            this.sourceFile = file;
+        }
+
+        void run() throws IOException {
+            List<String> lines = Files.readAllLines(sourceFile);
+            for (int lineNumber = 1; lineNumber <= lines.size(); lineNumber++) {
+                String l = lines.get(lineNumber - 1).trim();
+                tokenize(l);
+            }
+        }
+
+        /*
+         * Tokenize the given string.  Comments are skipped.
+         */
+        List<String> tokenize(String l) {
+            while (!l.isEmpty()) {
+                if (inCommentBlock) {
+                    int comment = l.indexOf("*/");
+                    if (comment == -1)
+                        return emptyTokens();
+
+                    // end comment block
+                    inCommentBlock = false;
+                    if ((comment + 2) >= l.length()) {
+                        return emptyTokens();
+                    }
+                    l = l.substring(comment + 2, l.length()).trim();
+                }
+
+                // skip comment
+                int comment = l.indexOf("//");
+                if (comment >= 0) {
+                    l = l.substring(0, comment).trim();
+                    if (l.isEmpty()) return emptyTokens();
+                }
+
+                if (l.isEmpty()) {
+                    return emptyTokens();
+                }
+
+                int beginComment = l.indexOf("/*");
+                int endComment = l.indexOf("*/");
+                if (beginComment == -1)
+                    return tokens(l);
+
+                String s1 = l.substring(0, beginComment).trim();
+                if (endComment > 0) {
+                    String s2 = l.substring(endComment + 2, l.length()).trim();
+                    if (s1.isEmpty()) {
+                        l = s2;
+                    } else if (s2.isEmpty()) {
+                        l = s1;
+                    } else {
+                        l = s1 + " " + s2;
+                    }
+                } else {
+                    inCommentBlock = true;
+                    return tokens(s1);
+                }
+            }
+            return tokens(l);
+        }
+
+        private List<String> emptyTokens() {
+            this.tokens.add(EMPTY);
+            return EMPTY;
+        }
+        private List<String> tokens(String l) {
+            List<String> tokens = new ArrayList<>();
+            for (String s : l.split("\\s+")) {
+                int pos=0;
+                s = s.trim();
+                if (s.isEmpty())
+                     continue;
+
+                int i = s.indexOf(',', pos);
+                int j = s.indexOf(';', pos);
+                while ((i >= 0 && i < s.length()) || (j >= 0 && j < s.length())) {
+                    if (j == -1 || (i >= 0 && i < j)) {
+                        String n = s.substring(pos, i).trim();
+                        if (!n.isEmpty()) {
+                            tokens.add(n);
+                        }
+                        tokens.add(s.substring(i, i + 1));
+                        pos = i + 1;
+                        i = s.indexOf(',', pos);
+                    } else {
+                        String n = s.substring(pos, j).trim();
+                        if (!n.isEmpty()) {
+                            tokens.add(n);
+                        }
+                        tokens.add(s.substring(j, j + 1));
+                        pos = j + 1;
+                        j = s.indexOf(';', pos);
+                    }
+                }
+
+                String n = s.substring(pos).trim();
+                if (!n.isEmpty()) {
+                    tokens.add(n);
+                }
+            }
+            this.tokens.add(tokens);
+            return tokens;
+        }
+
+        /*
+         * Returns next token.
+         */
+        String nextToken() {
+            while (lineNumber <= tokens.size()) {
+                List<String> l = tokens.get(lineNumber-1);
+                if (index < l.size()) {
+                    return l.get(index++);
+                } else {
+                    lineNumber++;
+                    index = 0;
+                }
+            }
+            return null;
+        }
+
+        /*
+         * Peeks next token.
+         */
+        String peekToken() {
+            int ln = lineNumber;
+            int i = index;
+            while (ln <= tokens.size()) {
+                List<String> l = tokens.get(ln-1);
+                if (i < l.size()) {
+                    return l.get(i++);
+                } else {
+                    ln++;
+                    i = 0;
+                }
+            }
+            return null;
+        }
+
+        Error newError(String msg) {
+            if (lineNumber <= tokens.size()) {
+                throw new Error(sourceFile + ", line " +
+                    lineNumber + ", " + msg + " \"" + lineAt(lineNumber) + "\"");
+            } else {
+                throw new Error(sourceFile + ", line " + lineNumber + ", " + msg);
+            }
+        }
+
+        void dump() {
+            for (int i = 1; i <= tokens.size(); i++) {
+                System.out.format("%d: %s%n", i, lineAt(i));
+            }
+        }
+
+        private String lineAt(int i) {
+            return tokens.get(i-1).stream().collect(Collectors.joining(" "));
+        }
+    }
 }
+
--- a/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Thu Aug 09 22:06:11 2018 +0200
@@ -28,6 +28,7 @@
 import java.io.BufferedWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.io.UncheckedIOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -35,6 +36,7 @@
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import build.tools.module.GenModuleInfoSource.Statement;
 
@@ -42,29 +44,36 @@
  * Sanity test for GenModuleInfoSource tool
  */
 public class ModuleInfoExtraTest {
-    private static final Path DIR = Paths.get("test");
+    private static final Path DIR = Paths.get("gen-module-info-test");
+    private static boolean verbose = false;
     public static void main(String... args) throws Exception {
         if (args.length != 0)
-            GenModuleInfoSource.verbose = true;
+            verbose = true;
 
+        GenModuleInfoSource.verbose = verbose;
         ModuleInfoExtraTest test = new ModuleInfoExtraTest("m", "m1", "m2", "m3");
-        test.run();
+        test.testModuleInfo();
+        test.errorCases();
     }
 
     String[] moduleInfo = new String[] {
-        "exports p",
-        "to",
-        "   // comment",
-        "   /* comment */ m1",
+        "module m {",
+        "    requires m1;",
+        "    requires transitive m2;",
+        "    exports p",
+        "    to",
+        "               // comment ... ",
+        "    /* comment */ m1",
         ",",
-        "m2,m3",
-        "   ;",
-        "exports q to m1;",
-        "provides s with /* ",
-        "  comment */ impl     ;    // comment",
-        "provides s1",
-        "    with  ",
-        "    impl1, impl2;"
+        "       m2,m3",
+        "  ;",
+        "    exports q to m1;",
+        "    provides s with /*   ",
+        "    comment */ impl     ;    // comment",
+        "    provides s1",
+        "       with  ",
+        "       impl1, impl2;",
+        "}"
     };
 
     String[] moduleInfoExtra = new String[] {
@@ -76,33 +85,8 @@
         "opens p.q ",
         "   to /* comment */ m3",
         "   , // m1",
-        "   /* comment */, m4;",
-        "provides s1 with impl3;"
-    };
-
-    String[] test1 = new String[] {
-        "exports p1 to m1;",
-        "exports p2"
-    };
-
-    String[] test2 = new String[] {
-        "exports to m1;"
-    };
-
-    String[] test3 = new String[]{
-        "exports p3 to m1;",
-        "    m2, m3;"
-    };
-
-    String[] test4 = new String[]{
-        "provides s with impl1;",   // typo ; should be ,
-        "   impl2, impl3;"
-    };
-
-    String[] test5 = new String[]{
-        "uses s3",
-        "provides s3 with impl1,",
-        "   impl2, impl3;"
+        "   /* comment */ m4; uses p.I",
+        ";   provides s1 with impl3;"
     };
 
     final Builder builder;
@@ -110,11 +94,6 @@
         this.builder = new Builder(name).modules(modules);
     }
 
-    void run() throws IOException {
-        testModuleInfo();
-        errorCases();
-    }
-
 
     void testModuleInfo() throws IOException {
         GenModuleInfoSource source = builder.sourceFile(moduleInfo).build();
@@ -155,7 +134,9 @@
                Set<String> opensPQ,
                Set<String> providerS,
                Set<String> providerS1) {
-        source.moduleInfo.print(new PrintWriter(System.out, true));
+        if (verbose)
+            source.moduleInfo.print(new PrintWriter(System.out, true));
+
         Statement export = source.moduleInfo.exports.get("p");
         if (!export.targets.equals(targetsP)) {
             throw new Error("unexpected: " + export);
@@ -177,24 +158,112 @@
         }
     }
 
+    final Map<String[], String> badModuleInfos = Map.of(
+        new String[] {
+            "module x {",
+            "   exports p1 to ",
+            "           m1",
+            "}"
+        },                      ".*, line .*, missing semicolon.*",
+        new String[] {
+            "module x ",
+            "   exports p1;"
+        },                      ".*, line .*, missing \\{.*",
+        new String[] {
+            "module x {",
+            "   requires m1;",
+            "   requires",
+            "}"
+        },                      ".*, line .*, <identifier> missing.*",
+        new String[] {
+            "module x {",
+            "   requires transitive m1",
+            "}"
+        },                      ".*, line .*, missing semicolon.*",
+        new String[] {
+            "module x {",
+            "   exports p1 to m1;",
+            "   exports p1 to m2;",
+            "}"
+        },                      ".*, line .*, multiple exports p1.*"
+    );
 
+    final Map<String[], String> badExtraFiles = Map.of(
+            new String[] {
+                "requires m2;"     // not allowed
+            },                      ".*, line .*, cannot declare requires .*",
+            new String[] {
+                "exports p1 to m1;",
+                "exports p2"            // missing semicolon
+            },                      ".*, line .*, reach end of file.*",
+            new String[] {
+                "exports to m1;"        // missing <identifier>
+            },                      ".*, line .*, <identifier> missing.*",
+            new String[] {
+                "exports p3 to m1;",
+                "    m2, m3;"           // missing keyword
+            },                      ".*, line .*, missing keyword.*",
+            new String[] {
+                "provides s with impl1;",   // typo ; should be ,
+                "   impl2, impl3;"
+            },                      ".*, line .*, missing keyword.*",
+            new String[] {
+                "uses s3",                  // missing semicolon
+                "provides s3 with impl1,",
+                "   impl2, impl3;"
+            },                      ".*, line .*, missing semicolon.*",
+            new String[] {
+                "opens p1 to m1,, m2;"     // missing identifier
+            },                      ".*, line .*, <identifier> missing.*"
+    );
 
-    void errorCases() throws IOException {
-        fail(test1);
-        fail(test2);
-        fail(test3);
-        fail(test4);
-        fail(test5);
+    final Map<String[], String> duplicates = Map.of(
+            new String[] {
+                "   exports p1 to m1, m2;",
+                "   exports p1 to m3;",
+            },                      ".*, line .*, multiple exports p1.*",
+            new String[] {
+                "   opens p1 to m1, m2;",
+                "   exports p1 to m3;",
+                "   opens p1 to m3;"
+            },                      ".*, line .*, multiple opens p1.*",
+            new String[] {
+                "   uses s;",
+                "   uses s;"
+            },                      ".*, line .*, multiple uses s.*"
+    );
+
+    void errorCases() {
+        badModuleInfos.entrySet().stream().forEach(e -> badModuleInfoFile(e.getKey(), e.getValue()));
+        badExtraFiles.entrySet().stream().forEach(e -> badExtraFile(e.getKey(), e.getValue()));
+        duplicates.entrySet().stream().forEach(e -> badExtraFile(e.getKey(), e.getValue()));
     }
 
-    void fail(String... extras) throws IOException {
+    void badModuleInfoFile(String[] lines, String regex)  {
+        Builder builder = new Builder("x").modules("m1", "m2", "m3");
+        try {
+            GenModuleInfoSource source = builder.sourceFile(lines).build();
+            throw new RuntimeException("Expected error: " + Arrays.toString(lines));
+        } catch (IOException e) {
+            throw new UncheckedIOException(e);
+        } catch (Error e) {
+            if (!e.getMessage().matches(regex)) {
+                throw e;
+            }
+        }
+    }
+
+    void badExtraFile(String[] extras, String regex)  {
         Path file = DIR.resolve("test1");
-        Files.write(file, Arrays.asList(extras));
         try {
+            Files.write(file, Arrays.asList(extras));
             builder.build(file);
-        } catch (RuntimeException e) {
-            if (!e.getMessage().matches("test/test1, line .* is malformed.*") &&
-                !e.getMessage().matches("test/test1, line .* missing keyword.*")) {
+            Files.deleteIfExists(file);
+            throw new RuntimeException("Expected error: " + Arrays.toString(extras));
+        } catch (IOException e) {
+            throw new UncheckedIOException(e);
+        } catch (Error e) {
+            if (!e.getMessage().matches(regex)) {
                 throw e;
             }
         }
@@ -218,11 +287,9 @@
             Files.createDirectories(sourceFile.getParent());
             try (BufferedWriter bw = Files.newBufferedWriter(sourceFile);
                  PrintWriter writer = new PrintWriter(bw)) {
-                writer.format("module %s {%n", moduleName);
                 for (String l : lines) {
                     writer.println(l);
                 }
-                writer.println("}");
             }
             return this;
         }
--- a/make/lib/Lib-jdk.jdi.gmk	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/lib/Lib-jdk.jdi.gmk	Thu Aug 09 22:06:11 2018 +0200
@@ -32,7 +32,7 @@
   $(eval $(call SetupJdkLibrary, BUILD_LIBDT_SHMEM, \
       NAME := dt_shmem, \
       OPTIMIZATION := LOW, \
-      CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP, \
+      CFLAGS := $(CFLAGS_JDKLIB), \
       EXTRA_HEADER_DIRS := \
           jdk.jdwp.agent:include \
           jdk.jdwp.agent:libjdwp/export, \
--- a/make/lib/Lib-jdk.jdwp.agent.gmk	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/lib/Lib-jdk.jdwp.agent.gmk	Thu Aug 09 22:06:11 2018 +0200
@@ -30,8 +30,7 @@
 $(eval $(call SetupJdkLibrary, BUILD_LIBDT_SOCKET, \
     NAME := dt_socket, \
     OPTIMIZATION := LOW, \
-    CFLAGS := $(CFLAGS_JDKLIB) -DUSE_MMAP \
-        $(LIBDT_SOCKET_CPPFLAGS), \
+    CFLAGS := $(CFLAGS_JDKLIB) $(LIBDT_SOCKET_CPPFLAGS), \
     EXTRA_HEADER_DIRS := \
         include \
         libjdwp/export, \
--- a/make/nb_native/nbproject/configurations.xml	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/nb_native/nbproject/configurations.xml	Thu Aug 09 22:06:11 2018 +0200
@@ -116,6 +116,7 @@
             <in>IBM1122.map</in>
             <in>IBM1123.map</in>
             <in>IBM1124.map</in>
+            <in>IBM1129.map</in>
             <in>IBM1140.map</in>
             <in>IBM1141.map</in>
             <in>IBM1142.map</in>
@@ -16285,6 +16286,11 @@
             tool="3"
             flavor2="0">
       </item>
+      <item path="../../make/data/charsetmapping/IBM1129.map"
+            ex="false"
+            tool="3"
+            flavor2="0">
+      </item>
       <item path="../../make/data/charsetmapping/IBM1140.map"
             ex="false"
             tool="3"
--- a/make/test/JtregGraalUnit.gmk	Thu Aug 02 22:06:18 2018 +0200
+++ b/make/test/JtregGraalUnit.gmk	Thu Aug 09 22:06:11 2018 +0200
@@ -46,6 +46,20 @@
     COMPILE_OUTPUTDIR := $(SUPPORT_OUTPUTDIR)/test/graalunit
     LIB_OUTPUTDIR := $(TEST_IMAGE_DIR)/hotspot/jtreg/graal
 
+    TEST_COMPILE_CP := \
+        $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.compiler \
+        $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.ci \
+        $(LIB_OUTPUTDIR)/junit-4.12.jar \
+        $(LIB_OUTPUTDIR)/asm-5.0.4.jar \
+        $(LIB_OUTPUTDIR)/asm-tree-5.0.4.jar \
+        $(LIB_OUTPUTDIR)/java-allocation-instrumenter.jar \
+        $(LIB_OUTPUTDIR)/hamcrest-core-1.3.jar
+
+    TEST_JAVAC_FLAGS := \
+        -Xlint:none \
+        -processorpath $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \
+        --add-exports jdk.unsupported/sun.misc=ALL-UNNAMED \
+
     ### Copy 3rd party libs
     $(eval $(call SetupCopyFiles, COPY_GRAALUNIT_LIBS, \
         FILES := $(wildcard $(GRAALUNIT_LIB)/*.jar), \
@@ -54,7 +68,7 @@
 
     TARGETS_EXTRA_LIB += $(COPY_GRAALUNIT_LIBS)
 
-    ### Compile and build graalunit tests
+    ### Compile graalunit tests
     $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS, \
         SETUP := GENERATE_USINGJDKBYTECODE, \
         SRC := \
@@ -84,26 +98,41 @@
             $(SRC_DIR)/org.graalvm.compiler.jtt/src \
             $(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \
             , \
+        EXCLUDE_FILES := org/graalvm/compiler/core/test/VerifyDebugUsageTest.java, \
         BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
-        JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \
-        CLASSPATH := \
-            $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.compiler \
-            $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.ci \
-            $(LIB_OUTPUTDIR)/junit-4.12.jar \
-            $(LIB_OUTPUTDIR)/asm-5.0.4.jar \
-            $(LIB_OUTPUTDIR)/asm-tree-5.0.4.jar \
-            $(LIB_OUTPUTDIR)/java-allocation-instrumenter.jar \
-            $(LIB_OUTPUTDIR)/hamcrest-core-1.3.jar \
-            , \
-        ADD_JAVAC_FLAGS := \
-            -Xlint:none -processorpath \
-            $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \
-            --add-exports jdk.unsupported/sun.misc=ALL-UNNAMED \
-            , \
+        CLASSPATH := $(TEST_COMPILE_CP), \
+        ADD_JAVAC_FLAGS := $(TEST_JAVAC_FLAGS), \
     ))
 
     TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS)
 
+    ### Compile graalunit tests which require -XDstringConcat=inline
+    $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS_SET2, \
+        SETUP := GENERATE_USINGJDKBYTECODE, \
+        DEPENDS := $(BUILD_VM_COMPILER_TESTS), \
+        SRC := $(SRC_DIR)/org.graalvm.compiler.core.test/src, \
+        INCLUDE_FILES := org/graalvm/compiler/core/test/VerifyDebugUsageTest.java, \
+        BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
+        CLASSPATH := \
+            $(TEST_COMPILE_CP) \
+            $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests \
+            , \
+        ADD_JAVAC_FLAGS := \
+            $(TEST_JAVAC_FLAGS) \
+            -XDstringConcat=inline \
+            , \
+    ))
+
+    TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS_SET2)
+
+    ### Generate jdk.vm.compiler.tests.jar
+    $(eval $(call SetupJarArchive, BUILD_VM_COMPILER_TESTS_JAR, \
+        DEPENDENCIES := $(BUILD_VM_COMPILER_TESTS) $(BUILD_VM_COMPILER_TESTS_SET2), \
+        SRCS := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \
+        JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \
+    ))
+
+    TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS_JAR)
 
     ### Compile and build mxtool
     $(eval $(call SetupJavaCompilation, BUILD_MXTOOL, \
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -48,6 +48,10 @@
   virtual void obj_equals(MacroAssembler* masm,
                           Register obj1, Register obj2);
 
+  virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
+    // Default implementation does not need to do anything.
+  }
+
   virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                              Register obj, Register tmp, Label& slowpath);
 
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -3990,6 +3990,15 @@
   }
 }
 
+void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
+  // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+  if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+    decorators |= ACCESS_READ | ACCESS_WRITE;
+  }
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  return bs->resolve(this, decorators, obj);
+}
+
 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
                                    Register thread_tmp, DecoratorSet decorators) {
   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -795,6 +795,10 @@
   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
                        Register tmp1, Register tmp_thread);
 
+  // Resolves obj for access. Result is placed in the same register.
+  // All other registers are preserved.
+  void resolve(DecoratorSet decorators, Register obj);
+
   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1839,6 +1839,8 @@
     // Load the oop from the handle
     __ ldr(obj_reg, Address(oop_handle_reg, 0));
 
+    __ resolve(IS_NOT_NULL, obj_reg);
+
     if (UseBiasedLocking) {
       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, lock_done, &slow_path_lock);
     }
@@ -2001,6 +2003,8 @@
     // Get locked oop from the handle we passed to jni
     __ ldr(obj_reg, Address(oop_handle_reg, 0));
 
+    __ resolve(IS_NOT_NULL, obj_reg);
+
     Label done;
 
     if (UseBiasedLocking) {
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -836,6 +836,7 @@
 #endif // ASSERT
 
     __ bind(done);
+    __ resolve(IS_NOT_NULL, r0);
   }
 
   // add space for monitor & lock
@@ -1062,6 +1063,7 @@
       __ ldrw(crc,   Address(esp, 4*wordSize)); // Initial CRC
     } else {
       __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
+      __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
       __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
       __ ldrw(off, Address(esp, wordSize)); // offset
       __ add(buf, buf, off); // + offset
@@ -1106,6 +1108,9 @@
     __ ldrw(off, Address(esp, wordSize)); // int offset
     __ sub(len, end, off);
     __ ldr(buf, Address(esp, 2*wordSize)); // byte[] buf | long buf
+    if (kind == Interpreter::java_util_zip_CRC32C_updateBytes) {
+      __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
+    }
     __ add(buf, buf, off); // + offset
     if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
       __ ldrw(crc, Address(esp, 4*wordSize)); // long crc
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -3840,6 +3840,8 @@
   // check for NULL object
   __ null_check(r0);
 
+  __ resolve(IS_NOT_NULL, r0);
+
   const Address monitor_block_top(
         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   const Address monitor_block_bot(
@@ -3939,6 +3941,8 @@
   // check for NULL object
   __ null_check(r0);
 
+  __ resolve(IS_NOT_NULL, r0);
+
   const Address monitor_block_top(
         rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   const Address monitor_block_bot(
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1778,8 +1778,12 @@
 #else
   // FIXME: membar_release
   __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
+  Register addr = op->addr()->is_register() ?
+    op->addr()->as_pointer_register() :
+    op->addr()->as_address_ptr()->base()->as_pointer_register();
+  assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
+  assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index");
   if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
-    Register addr = op->addr()->as_register();
     Register cmpval = op->cmp_value()->as_register();
     Register newval = op->new_value()->as_register();
     Register dest = op->result_opr()->as_register();
@@ -1790,7 +1794,6 @@
     __ mov(dest, 0, ne);
   } else if (op->code() == lir_cas_long) {
     assert(VM_Version::supports_cx8(), "wrong machine");
-    Register addr = op->addr()->as_pointer_register();
     Register cmp_value_lo = op->cmp_value()->as_register_lo();
     Register cmp_value_hi = op->cmp_value()->as_register_hi();
     Register new_value_lo = op->new_value()->as_register_lo();
@@ -3468,7 +3471,12 @@
 }
 
 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
+#ifdef AARCH64
   Register ptr = src->as_pointer_register();
+#else
+  assert(src->is_address(), "sanity");
+  Address addr = as_Address(src->as_address_ptr());
+#endif
 
   if (code == lir_xchg) {
 #ifdef AARCH64
@@ -3493,15 +3501,15 @@
 #ifdef AARCH64
     __ ldaxr_w(dst, ptr);
 #else
-    __ ldrex(dst, Address(ptr));
+    __ ldrex(dst, addr);
 #endif
     if (code == lir_xadd) {
       Register tmp_reg = tmp->as_register();
       if (data->is_constant()) {
-        assert_different_registers(dst, ptr, tmp_reg);
+        assert_different_registers(dst, tmp_reg);
         __ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
       } else {
-        assert_different_registers(dst, ptr, tmp_reg, data->as_register());
+        assert_different_registers(dst, tmp_reg, data->as_register());
         __ add_32(tmp_reg, dst, data->as_register());
       }
       new_val = tmp_reg;
@@ -3511,12 +3519,12 @@
       } else {
         new_val = data->as_register();
       }
-      assert_different_registers(dst, ptr, new_val);
+      assert_different_registers(dst, new_val);
     }
 #ifdef AARCH64
     __ stlxr_w(Rtemp, new_val, ptr);
 #else
-    __ strex(Rtemp, new_val, Address(ptr));
+    __ strex(Rtemp, new_val, addr);
 #endif // AARCH64
 
 #ifdef AARCH64
@@ -3551,7 +3559,7 @@
     assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
 
     __ bind(retry);
-    __ ldrexd(dst_lo, Address(ptr));
+    __ ldrexd(dst_lo, addr);
     if (code == lir_xadd) {
       Register tmp_lo = tmp->as_register_lo();
       Register tmp_hi = tmp->as_register_hi();
@@ -3562,7 +3570,7 @@
       if (data->is_constant()) {
         jlong c = data->as_constant_ptr()->as_jlong();
         assert((jlong)((jint)c) == c, "overflow");
-        assert_different_registers(dst_lo, dst_hi, ptr, tmp_lo, tmp_hi);
+        assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
         __ adds(tmp_lo, dst_lo, (jint)c);
         __ adc(tmp_hi, dst_hi, 0);
       } else {
@@ -3570,18 +3578,18 @@
         Register new_val_hi = data->as_register_hi();
         __ adds(tmp_lo, dst_lo, new_val_lo);
         __ adc(tmp_hi, dst_hi, new_val_hi);
-        assert_different_registers(dst_lo, dst_hi, ptr, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
+        assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
       }
       new_val_lo = tmp_lo;
     } else {
       new_val_lo = data->as_register_lo();
       Register new_val_hi = data->as_register_hi();
 
-      assert_different_registers(dst_lo, dst_hi, ptr, new_val_lo, new_val_hi);
+      assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
       assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
       assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
     }
-    __ strexd(Rtemp, new_val_lo, Address(ptr));
+    __ strexd(Rtemp, new_val_lo, addr);
 #endif // AARCH64
   } else {
     ShouldNotReachHere();
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -2219,8 +2219,8 @@
       _evex_encoding(0),
       _is_clear_context(true),
       _is_extended_context(false),
-      _current_assembler(NULL),
-      _embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
+      _embedded_opmask_register_specifier(1), // hard code k1, it will be initialized for now
+      _current_assembler(NULL) {
     if (UseAVX < 3) _legacy_mode = true;
   }
 
--- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -89,13 +89,13 @@
 }
 
 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
-  : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) {
+  : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
   assert(info != NULL, "must have info");
   _info = new CodeEmitInfo(info);
 }
 
 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
-  : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) {
+  : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
   assert(info != NULL, "must have info");
   _info = new CodeEmitInfo(info);
 }
--- a/src/hotspot/cpu/x86/c1_LinearScan_x86.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_LinearScan_x86.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -110,9 +110,9 @@
 
 FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
   : _compilation(compilation)
+  , _allocator(allocator)
   , _lir(NULL)
   , _pos(-1)
-  , _allocator(allocator)
   , _sim(compilation)
   , _temp_sim(compilation)
 {}
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -61,6 +61,10 @@
   virtual void obj_equals(MacroAssembler* masm,
                           Register obj1, Address obj2);
 
+  virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) {
+    // Default implementation does not need to do anything.
+  }
+
   // Support for jniFastGetField to try resolving a jobject/jweak in native
   virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                              Register obj, Register tmp, Label& slowpath);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -6287,6 +6287,15 @@
   }
 }
 
+void MacroAssembler::resolve(DecoratorSet decorators, Register obj) {
+  // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+  if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+    decorators |= ACCESS_READ | ACCESS_WRITE;
+  }
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  return bs->resolve(this, decorators, obj);
+}
+
 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1,
                                    Register thread_tmp, DecoratorSet decorators) {
   access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -319,6 +319,10 @@
   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register src,
                        Register tmp1, Register tmp2);
 
+  // Resolves obj access. Result is placed in the same register.
+  // All other registers are preserved.
+  void resolve(DecoratorSet decorators, Register obj);
+
   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1633,12 +1633,12 @@
    public:
     MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
       _src(src)
+    , _dst(dst)
     , _src_index(src_index)
-    , _dst(dst)
     , _dst_index(dst_index)
+    , _processed(false)
     , _next(NULL)
-    , _prev(NULL)
-    , _processed(false) {
+    , _prev(NULL) {
     }
 
     VMRegPair src() const              { return _src; }
@@ -2450,6 +2450,7 @@
     // Load the oop from the handle
     __ movptr(obj_reg, Address(oop_handle_reg, 0));
 
+    __ resolve(IS_NOT_NULL, obj_reg);
     if (UseBiasedLocking) {
       __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
     }
@@ -2635,6 +2636,7 @@
 
     // Get locked oop from the handle we passed to jni
     __ movptr(obj_reg, Address(oop_handle_reg, 0));
+    __ resolve(IS_NOT_NULL, obj_reg);
 
     Label done;
 
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -635,6 +635,7 @@
 #endif // ASSERT
 
     __ bind(done);
+    __ resolve(IS_NOT_NULL, rax);
   }
 
   // add space for monitor & lock
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86_64.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -257,6 +257,7 @@
       __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
     } else {
       __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
+      __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
       __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
       __ addq(buf, off); // + offset
@@ -312,6 +313,7 @@
       //    "When calculating operand stack length, values of type long and double have length two."
     } else {
       __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array
+      __ resolve(IS_NOT_NULL | ACCESS_READ, buf);
       __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
       __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset
       __ addq(buf, off); // + offset
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -4357,6 +4357,8 @@
   // check for NULL object
   __ null_check(rax);
 
+  __ resolve(IS_NOT_NULL, rax);
+
   const Address monitor_block_top(
         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   const Address monitor_block_bot(
@@ -4454,6 +4456,8 @@
   // check for NULL object
   __ null_check(rax);
 
+  __ resolve(IS_NOT_NULL, rax);
+
   const Address monitor_block_top(
         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   const Address monitor_block_bot(
--- a/src/hotspot/os/linux/os_perf_linux.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/os/linux/os_perf_linux.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1094,7 +1094,7 @@
 
   NetworkInterface* ret = NULL;
   for (cur_address = addresses; cur_address != NULL; cur_address = cur_address->ifa_next) {
-    if (cur_address->ifa_addr->sa_family != AF_PACKET) {
+    if ((cur_address->ifa_addr == NULL) || (cur_address->ifa_addr->sa_family != AF_PACKET)) {
       continue;
     }
 
@@ -1105,6 +1105,7 @@
     ret = cur;
   }
 
+  freeifaddrs(addresses);
   *network_interfaces = ret;
 
   return OS_OK;
--- a/src/hotspot/os/solaris/os_perf_solaris.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/os/solaris/os_perf_solaris.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -808,6 +808,7 @@
     }
   }
 
+  kstat_close(ctl);
   *network_interfaces = ret;
 
   return OS_OK;
--- a/src/hotspot/share/adlc/output_h.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/adlc/output_h.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1006,19 +1006,19 @@
   fprintf(fp_hpp, "           enum machPipelineStages * const stage,\n");
   fprintf(fp_hpp, "           uint                    * const cycles,\n");
   fprintf(fp_hpp, "           Pipeline_Use                    resource_use)\n");
-  fprintf(fp_hpp, "  : _write_stage(write_stage)\n");
-  fprintf(fp_hpp, "  , _read_stage_count(count)\n");
+  fprintf(fp_hpp, "  : _read_stage_count(count)\n");
+  fprintf(fp_hpp, "  , _write_stage(write_stage)\n");
+  fprintf(fp_hpp, "  , _fixed_latency(fixed_latency)\n");
+  fprintf(fp_hpp, "  , _instruction_count(instruction_count)\n");
   fprintf(fp_hpp, "  , _has_fixed_latency(has_fixed_latency)\n");
-  fprintf(fp_hpp, "  , _fixed_latency(fixed_latency)\n");
+  fprintf(fp_hpp, "  , _has_branch_delay(has_branch_delay)\n");
+  fprintf(fp_hpp, "  , _has_multiple_bundles(has_multiple_bundles)\n");
+  fprintf(fp_hpp, "  , _force_serialization(force_serialization)\n");
+  fprintf(fp_hpp, "  , _may_have_no_code(may_have_no_code)\n");
   fprintf(fp_hpp, "  , _read_stages(dst)\n");
   fprintf(fp_hpp, "  , _resource_stage(stage)\n");
   fprintf(fp_hpp, "  , _resource_cycles(cycles)\n");
   fprintf(fp_hpp, "  , _resource_use(resource_use)\n");
-  fprintf(fp_hpp, "  , _instruction_count(instruction_count)\n");
-  fprintf(fp_hpp, "  , _has_branch_delay(has_branch_delay)\n");
-  fprintf(fp_hpp, "  , _has_multiple_bundles(has_multiple_bundles)\n");
-  fprintf(fp_hpp, "  , _force_serialization(force_serialization)\n");
-  fprintf(fp_hpp, "  , _may_have_no_code(may_have_no_code)\n");
   fprintf(fp_hpp, "  {};\n");
   fprintf(fp_hpp, "\n");
   fprintf(fp_hpp, "  uint writeStage() const {\n");
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -714,7 +714,7 @@
 void AOTCodeHeap::sweep_method(AOTCompiledMethod *aot) {
   int indexes[] = {aot->method_index()};
   sweep_dependent_methods(indexes, 1);
-  vmassert(aot->method()->code() != aot && aot->method()->aot_code() == NULL, "method still active");
+  vmassert(aot->method()->code() != aot TIERED_ONLY( && aot->method()->aot_code() == NULL), "method still active");
 }
 
 
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -206,6 +206,7 @@
   return true;
 }
 
+#ifdef TIERED
 bool AOTCompiledMethod::make_entrant() {
   assert(!method()->is_old(), "reviving evolved method!");
   assert(*_state_adr != not_entrant, "%s", method()->has_aot_code() ? "has_aot_code() not cleared" : "caller didn't check has_aot_code()");
@@ -240,6 +241,7 @@
 
   return true;
 }
+#endif // TIERED
 
 // We don't have full dependencies for AOT methods, so flushing is
 // more conservative than for nmethods.
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -138,8 +138,8 @@
     _heap(heap),
     _name(name),
     _metadata_size(metadata_size),
-    _method_index(method_index),
-    _aot_id(aot_id) {
+    _aot_id(aot_id),
+    _method_index(method_index) {
 
     _is_far_code = CodeCache::is_far_target(code) ||
                    CodeCache::is_far_target(code + meta->code_size());
@@ -194,7 +194,7 @@
   virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
   virtual void log_identity(xmlStream* stream) const;
   virtual void log_state_change() const;
-  virtual bool make_entrant();
+  virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; });
   virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
   virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
   virtual address entry_point() const { return _code + _meta->entry_offset(); }
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -218,7 +218,7 @@
 
  public:
   ImplicitNullCheckStub(int offset, CodeEmitInfo* info)
-    : _offset(offset), _info(info) {
+    : _info(info), _offset(offset) {
   }
   virtual void emit_code(LIR_Assembler* e);
   virtual CodeEmitInfo* info() const             { return _info; }
@@ -479,7 +479,7 @@
 
  public:
   SimpleExceptionStub(Runtime1::StubID stub, LIR_Opr obj, CodeEmitInfo* info):
-    _obj(obj), _info(info), _stub(stub) {
+    _obj(obj), _stub(stub), _info(info) {
   }
 
   void set_obj(LIR_Opr obj) {
--- a/src/hotspot/share/c1/c1_Compilation.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_Compilation.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -538,12 +538,13 @@
   }
 }
 
-
 Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* method,
                          int osr_bci, BufferBlob* buffer_blob, DirectiveSet* directive)
-: _compiler(compiler)
+: _next_id(0)
+, _next_block_id(0)
+, _compiler(compiler)
+, _directive(directive)
 , _env(env)
-, _directive(directive)
 , _log(env->log())
 , _method(method)
 , _osr_bci(osr_bci)
@@ -553,19 +554,17 @@
 , _masm(NULL)
 , _has_exception_handlers(false)
 , _has_fpu_code(true)   // pessimistic assumption
+, _has_unsafe_access(false)
 , _would_profile(false)
-, _has_unsafe_access(false)
 , _has_method_handle_invokes(false)
 , _has_reserved_stack_access(method->has_reserved_stack_access())
 , _bailout_msg(NULL)
 , _exception_info_list(NULL)
 , _allocator(NULL)
-, _next_id(0)
-, _next_block_id(0)
 , _code(buffer_blob)
 , _has_access_indexed(false)
+, _interpreter_frame_size(0)
 , _current_instruction(NULL)
-, _interpreter_frame_size(0)
 #ifndef PRODUCT
 , _last_instruction_printed(NULL)
 , _cfg_printer_output(NULL)
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -102,11 +102,11 @@
  , _scope(scope)
  , _blocks(16)
  , _bci2block(new BlockList(scope->method()->code_size(), NULL))
- , _next_block_number(0)
  , _active()         // size not known yet
  , _visited()        // size not known yet
+ , _loop_map() // size not known yet
  , _next_loop_index(0)
- , _loop_map() // size not known yet
+ , _next_block_number(0)
 {
   set_entries(osr_bci);
   set_leaders();
@@ -680,10 +680,10 @@
   , _has_handler(false)
   , _stream(NULL)
   , _work_list(NULL)
+  , _caller_stack_size(-1)
+  , _continuation(NULL)
   , _parsing_jsr(false)
   , _jsr_xhandlers(NULL)
-  , _caller_stack_size(-1)
-  , _continuation(NULL)
   , _num_returns(0)
   , _cleanup_block(NULL)
   , _cleanup_return_prev(NULL)
@@ -3195,11 +3195,11 @@
 
 GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope)
   : _scope_data(NULL)
+  , _compilation(compilation)
+  , _memory(new MemoryBuffer())
+  , _inline_bailout_msg(NULL)
   , _instruction_count(0)
   , _osr_entry(NULL)
-  , _memory(new MemoryBuffer())
-  , _compilation(compilation)
-  , _inline_bailout_msg(NULL)
 {
   int osr_bci = compilation->osr_bci();
 
--- a/src/hotspot/share/c1/c1_IR.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_IR.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -132,8 +132,8 @@
 
 
 IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMethod* method, int osr_bci, bool create_graph)
-: _callees(2)
-, _compilation(compilation)
+: _compilation(compilation)
+, _callees(2)
 , _requires_phi_function(method->max_locals())
 {
   _caller             = caller;
@@ -184,11 +184,11 @@
 
 // Stack must be NON-null
 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers, bool deoptimize_on_exception)
-  : _scope(stack->scope())
-  , _scope_debug_info(NULL)
+  : _scope_debug_info(NULL)
+  , _scope(stack->scope())
+  , _exception_handlers(exception_handlers)
   , _oop_map(NULL)
   , _stack(stack)
-  , _exception_handlers(exception_handlers)
   , _is_method_handle_invoke(false)
   , _deoptimize_on_exception(deoptimize_on_exception) {
   assert(_stack != NULL, "must be non null");
@@ -196,9 +196,9 @@
 
 
 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
-  : _scope(info->_scope)
+  : _scope_debug_info(NULL)
+  , _scope(info->_scope)
   , _exception_handlers(NULL)
-  , _scope_debug_info(NULL)
   , _oop_map(NULL)
   , _stack(stack == NULL ? info->_stack : stack)
   , _is_method_handle_invoke(info->_is_method_handle_invoke)
@@ -497,6 +497,7 @@
   // computation of final block order
   BlockBegin* common_dominator(BlockBegin* a, BlockBegin* b);
   void compute_dominator(BlockBegin* cur, BlockBegin* parent);
+  void compute_dominator_impl(BlockBegin* cur, BlockBegin* parent);
   int  compute_weight(BlockBegin* cur);
   bool ready_for_processing(BlockBegin* cur);
   void sort_into_work_list(BlockBegin* b);
@@ -526,14 +527,14 @@
   _num_blocks(0),
   _num_loops(0),
   _iterative_dominators(false),
+  _linear_scan_order(NULL), // initialized later with correct size
   _visited_blocks(_max_block_id),
   _active_blocks(_max_block_id),
   _dominator_blocks(_max_block_id),
   _forward_branches(_max_block_id, _max_block_id, 0),
   _loop_end_blocks(8),
+  _loop_map(0),             // initialized later with correct size
   _work_list(8),
-  _linear_scan_order(NULL), // initialized later with correct size
-  _loop_map(0),             // initialized later with correct size
   _compilation(c)
 {
   TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
@@ -770,6 +771,14 @@
 }
 
 void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* parent) {
+  init_visited();
+  compute_dominator_impl(cur, parent);
+}
+
+void ComputeLinearScanOrder::compute_dominator_impl(BlockBegin* cur, BlockBegin* parent) {
+  // Mark as visited to avoid recursive calls with same parent
+  set_visited(cur);
+
   if (cur->dominator() == NULL) {
     TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: initializing dominator of B%d to B%d", cur->block_id(), parent->block_id()));
     cur->set_dominator(parent);
@@ -788,7 +797,9 @@
   int num_cur_xhandler = cur->number_of_exception_handlers();
   for (int j = 0; j < num_cur_xhandler; j++) {
     BlockBegin* xhandler = cur->exception_handler_at(j);
-    compute_dominator(xhandler, parent);
+    if (!is_visited(xhandler)) {
+      compute_dominator_impl(xhandler, parent);
+    }
   }
 }
 
--- a/src/hotspot/share/c1/c1_IR.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_IR.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -215,8 +215,8 @@
                    GrowableArray<MonitorValue*>* monitors,
                    IRScopeDebugInfo*             caller):
       _scope(scope)
+    , _bci(bci)
     , _locals(locals)
-    , _bci(bci)
     , _expressions(expressions)
     , _monitors(monitors)
     , _caller(caller) {}
--- a/src/hotspot/share/c1/c1_Instruction.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_Instruction.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -410,19 +410,20 @@
 
   // creation
   Instruction(ValueType* type, ValueStack* state_before = NULL, bool type_is_constant = false)
-  : _use_count(0)
+  :
 #ifndef PRODUCT
-  , _printable_bci(-99)
+  _printable_bci(-99),
 #endif
+    _use_count(0)
   , _pin_state(0)
   , _type(type)
   , _next(NULL)
-  , _block(NULL)
   , _subst(NULL)
+  , _operand(LIR_OprFact::illegalOpr)
   , _flags(0)
-  , _operand(LIR_OprFact::illegalOpr)
   , _state_before(state_before)
   , _exception_handlers(NULL)
+  , _block(NULL)
   {
     check_state(state_before);
     assert(type != NULL && (!type->is_constant() || type_is_constant), "type must exist");
@@ -705,8 +706,8 @@
   Local(ciType* declared, ValueType* type, int index, bool receiver)
     : Instruction(type)
     , _java_index(index)
+    , _is_receiver(receiver)
     , _declared_type(declared)
-    , _is_receiver(receiver)
   {
     NOT_PRODUCT(set_printable_bci(-1));
   }
@@ -1664,19 +1665,21 @@
   , _bci(bci)
   , _depth_first_number(-1)
   , _linear_scan_number(-1)
+  , _dominator_depth(-1)
   , _loop_depth(0)
+  , _loop_index(-1)
   , _flags(0)
-  , _dominator_depth(-1)
+  , _total_preds(0)
+  , _stores_to_locals()
+  , _successors(2)
+  , _predecessors(2)
+  , _dominates(2)
   , _dominator(NULL)
   , _end(NULL)
-  , _predecessors(2)
-  , _successors(2)
-  , _dominates(2)
   , _exception_handlers(1)
   , _exception_states(NULL)
   , _exception_handler_pco(-1)
   , _lir(NULL)
-  , _loop_index(-1)
   , _live_in()
   , _live_out()
   , _live_gen()
@@ -1685,8 +1688,6 @@
   , _fpu_stack_state(NULL)
   , _first_lir_instruction_id(-1)
   , _last_lir_instruction_id(-1)
-  , _total_preds(0)
-  , _stores_to_locals()
   {
     _block = this;
 #ifndef PRODUCT
@@ -1872,18 +1873,18 @@
   // creation
   Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false)
     : BlockEnd(illegalType, state_before, is_safepoint)
-    , _direction(none)
     , _profiled_method(NULL)
-    , _profiled_bci(0) {
+    , _profiled_bci(0)
+    , _direction(none) {
     BlockList* s = new BlockList(1);
     s->append(sux);
     set_sux(s);
   }
 
   Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint)
-                                           , _direction(none)
                                            , _profiled_method(NULL)
-                                           , _profiled_bci(0) {
+                                           , _profiled_bci(0)
+                                           , _direction(none) {
     BlockList* s = new BlockList(1);
     s->append(sux);
     set_sux(s);
@@ -2550,9 +2551,9 @@
  public:
   RuntimeCall(ValueType* type, const char* entry_name, address entry, Values* args, bool pass_thread = true)
     : Instruction(type)
+    , _entry_name(entry_name)
     , _entry(entry)
     , _args(args)
-    , _entry_name(entry_name)
     , _pass_thread(pass_thread) {
     ASSERT_VALUES
     pin();
--- a/src/hotspot/share/c1/c1_InstructionPrinter.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_InstructionPrinter.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -47,8 +47,8 @@
 
  public:
   InstructionPrinter(bool print_phis = true, outputStream* output = tty)
-    : _print_phis(print_phis)
-    , _output(output)
+    : _output(output)
+    , _print_phis(print_phis)
   {}
 
   outputStream* output() { return _output; }
--- a/src/hotspot/share/c1/c1_LIR.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIR.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -321,9 +321,9 @@
   , _tmp2(tmp2)
   , _tmp3(tmp3)
   , _fast_check(fast_check)
-  , _stub(stub)
   , _info_for_patch(info_for_patch)
   , _info_for_exception(info_for_exception)
+  , _stub(stub)
   , _profiled_method(NULL)
   , _profiled_bci(-1)
   , _should_profile(false)
@@ -348,9 +348,9 @@
   , _tmp2(tmp2)
   , _tmp3(tmp3)
   , _fast_check(false)
-  , _stub(NULL)
   , _info_for_patch(NULL)
   , _info_for_exception(info_for_exception)
+  , _stub(NULL)
   , _profiled_method(NULL)
   , _profiled_bci(-1)
   , _should_profile(false)
@@ -367,14 +367,14 @@
 LIR_OpArrayCopy::LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length,
                                  LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info)
   : LIR_Op(lir_arraycopy, LIR_OprFact::illegalOpr, info)
-  , _tmp(tmp)
   , _src(src)
   , _src_pos(src_pos)
   , _dst(dst)
   , _dst_pos(dst_pos)
-  , _flags(flags)
+  , _length(length)
+  , _tmp(tmp)
   , _expected_type(expected_type)
-  , _length(length) {
+  , _flags(flags) {
   _stub = new ArrayCopyStub(this);
 }
 
--- a/src/hotspot/share/c1/c1_LIR.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIR.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -516,36 +516,36 @@
        _base(base)
      , _index(index)
      , _scale(times_1)
-     , _type(type)
-     , _disp(0) { verify(); }
+     , _disp(0)
+     , _type(type) { verify(); }
 
   LIR_Address(LIR_Opr base, intx disp, BasicType type):
        _base(base)
      , _index(LIR_OprDesc::illegalOpr())
      , _scale(times_1)
-     , _type(type)
-     , _disp(disp) { verify(); }
+     , _disp(disp)
+     , _type(type) { verify(); }
 
   LIR_Address(LIR_Opr base, BasicType type):
        _base(base)
      , _index(LIR_OprDesc::illegalOpr())
      , _scale(times_1)
-     , _type(type)
-     , _disp(0) { verify(); }
+     , _disp(0)
+     , _type(type) { verify(); }
 
   LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
        _base(base)
      , _index(index)
      , _scale(times_1)
-     , _type(type)
-     , _disp(disp) { verify(); }
+     , _disp(disp)
+     , _type(type) { verify(); }
 
   LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
        _base(base)
      , _index(index)
      , _scale(scale)
-     , _type(type)
-     , _disp(disp) { verify(); }
+     , _disp(disp)
+     , _type(type) { verify(); }
 
   LIR_Opr base()  const                          { return _base;  }
   LIR_Opr index() const                          { return _index; }
@@ -1058,30 +1058,32 @@
 
  public:
   LIR_Op()
-    : _result(LIR_OprFact::illegalOpr)
+    :
+#ifdef ASSERT
+      _file(NULL)
+    , _line(0),
+#endif
+      _result(LIR_OprFact::illegalOpr)
     , _code(lir_none)
     , _flags(0)
     , _info(NULL)
-#ifdef ASSERT
-    , _file(NULL)
-    , _line(0)
-#endif
+    , _id(-1)
     , _fpu_pop_count(0)
-    , _source(NULL)
-    , _id(-1)                             {}
+    , _source(NULL) {}
 
   LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
-    : _result(result)
+    :
+#ifdef ASSERT
+      _file(NULL)
+    , _line(0),
+#endif
+      _result(result)
     , _code(code)
     , _flags(0)
     , _info(info)
-#ifdef ASSERT
-    , _file(NULL)
-    , _line(0)
-#endif
+    , _id(-1)
     , _fpu_pop_count(0)
-    , _source(NULL)
-    , _id(-1)                             {}
+    , _source(NULL) {}
 
   CodeEmitInfo* info() const                  { return _info;   }
   LIR_Code code()      const                  { return (LIR_Code)_code;   }
@@ -1153,8 +1155,8 @@
   LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
              LIR_OprList* arguments, CodeEmitInfo* info = NULL)
     : LIR_Op(code, result, info)
-    , _arguments(arguments)
-    , _addr(addr) {}
+    , _addr(addr)
+    , _arguments(arguments) {}
 
  public:
   address addr() const                           { return _addr; }
@@ -1180,8 +1182,8 @@
                  address addr, LIR_OprList* arguments,
                  CodeEmitInfo* info)
   : LIR_OpCall(code, addr, result, arguments, info)
+  , _method(method)
   , _receiver(receiver)
-  , _method(method)
   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
 
@@ -1189,8 +1191,8 @@
                  LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
                  LIR_OprList* arguments, CodeEmitInfo* info)
   : LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
+  , _method(method)
   , _receiver(receiver)
-  , _method(method)
   , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
   { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
 
@@ -1345,14 +1347,14 @@
   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
     : LIR_Op(code, result, info)
     , _opr(opr)
-    , _patch(patch)
-    , _type(type)                      { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
+    , _type(type)
+    , _patch(patch)                    { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
 
   LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
     : LIR_Op(code, result, info)
     , _opr(opr)
-    , _patch(patch)
-    , _type(type)                      {
+    , _type(type)
+    , _patch(patch)                    {
     assert(code == lir_move, "must be");
     set_kind(kind);
   }
@@ -1360,8 +1362,8 @@
   LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
     , _opr(opr)
-    , _patch(lir_patch_none)
-    , _type(T_ILLEGAL)                 { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
+    , _type(T_ILLEGAL)
+    , _patch(lir_patch_none)           { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
 
   LIR_Opr in_opr()           const               { return _opr;   }
   LIR_PatchCode patch_code() const               { return _patch; }
@@ -1462,8 +1464,8 @@
  public:
    LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
      : LIR_Op1(lir_convert, opr, result)
-     , _stub(stub)
-     , _bytecode(code)                           {}
+     , _bytecode(code)
+     , _stub(stub)                               {}
 
   Bytecodes::Code bytecode() const               { return _bytecode; }
   ConversionStub* stub() const                   { return _stub; }
@@ -1501,8 +1503,8 @@
     , _tmp4(t4)
     , _hdr_size(hdr_size)
     , _obj_size(obj_size)
-    , _init_check(init_check)
-    , _stub(stub)                                { }
+    , _stub(stub)
+    , _init_check(init_check)                    { }
 
   LIR_Opr klass()        const                   { return in_opr();     }
   LIR_Opr obj()          const                   { return result_opr(); }
@@ -1611,31 +1613,31 @@
  public:
   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
     : LIR_Op(code, LIR_OprFact::illegalOpr, info)
+    , _fpu_stack_size(0)
     , _opr1(opr1)
     , _opr2(opr2)
     , _type(T_ILLEGAL)
-    , _condition(condition)
-    , _fpu_stack_size(0)
     , _tmp1(LIR_OprFact::illegalOpr)
     , _tmp2(LIR_OprFact::illegalOpr)
     , _tmp3(LIR_OprFact::illegalOpr)
     , _tmp4(LIR_OprFact::illegalOpr)
-    , _tmp5(LIR_OprFact::illegalOpr) {
+    , _tmp5(LIR_OprFact::illegalOpr)
+    , _condition(condition) {
     assert(code == lir_cmp || code == lir_assert, "code check");
   }
 
   LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
     : LIR_Op(code, result, NULL)
+    , _fpu_stack_size(0)
     , _opr1(opr1)
     , _opr2(opr2)
     , _type(type)
-    , _condition(condition)
-    , _fpu_stack_size(0)
     , _tmp1(LIR_OprFact::illegalOpr)
     , _tmp2(LIR_OprFact::illegalOpr)
     , _tmp3(LIR_OprFact::illegalOpr)
     , _tmp4(LIR_OprFact::illegalOpr)
-    , _tmp5(LIR_OprFact::illegalOpr) {
+    , _tmp5(LIR_OprFact::illegalOpr)
+    , _condition(condition) {
     assert(code == lir_cmove, "code check");
     assert(type != T_ILLEGAL, "cmove should have type");
   }
@@ -1643,32 +1645,32 @@
   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
           CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
     : LIR_Op(code, result, info)
+    , _fpu_stack_size(0)
     , _opr1(opr1)
     , _opr2(opr2)
     , _type(type)
-    , _condition(lir_cond_unknown)
-    , _fpu_stack_size(0)
     , _tmp1(LIR_OprFact::illegalOpr)
     , _tmp2(LIR_OprFact::illegalOpr)
     , _tmp3(LIR_OprFact::illegalOpr)
     , _tmp4(LIR_OprFact::illegalOpr)
-    , _tmp5(LIR_OprFact::illegalOpr) {
+    , _tmp5(LIR_OprFact::illegalOpr)
+    , _condition(lir_cond_unknown) {
     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
   }
 
   LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
           LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
     : LIR_Op(code, result, NULL)
+    , _fpu_stack_size(0)
     , _opr1(opr1)
     , _opr2(opr2)
     , _type(T_ILLEGAL)
-    , _condition(lir_cond_unknown)
-    , _fpu_stack_size(0)
     , _tmp1(tmp1)
     , _tmp2(tmp2)
     , _tmp3(tmp3)
     , _tmp4(tmp4)
-    , _tmp5(tmp5) {
+    , _tmp5(tmp5)
+    , _condition(lir_cond_unknown) {
     assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
   }
 
@@ -1833,8 +1835,8 @@
  public:
   LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
     : LIR_Op2(lir_assert, condition, opr1, opr2)
-    , _halt(halt)
-    , _msg(msg) {
+    , _msg(msg)
+    , _halt(halt) {
   }
 
   const char* msg() const                        { return _msg; }
@@ -1942,9 +1944,9 @@
     : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
     , _mdp(mdp)
     , _obj(obj)
+    , _tmp(tmp)
     , _exact_klass(exact_klass)
     , _current_klass(current_klass)
-    , _tmp(tmp)
     , _not_null(not_null)
     , _no_conflict(no_conflict) { }
 
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -99,9 +99,9 @@
 
 
 LIR_Assembler::LIR_Assembler(Compilation* c):
-   _compilation(c)
- , _masm(c->masm())
+   _masm(c->masm())
  , _bs(BarrierSet::barrier_set())
+ , _compilation(c)
  , _frame_map(c->frame_map())
  , _current_block(NULL)
  , _pending_non_safepoint(NULL)
--- a/src/hotspot/share/c1/c1_LinearScan.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_LinearScan.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -74,6 +74,7 @@
  , _ir(ir)
  , _gen(gen)
  , _frame_map(frame_map)
+ , _cached_blocks(*ir->linear_scan_order())
  , _num_virtual_regs(gen->max_virtual_register_number())
  , _has_fpu_registers(false)
  , _num_calls(-1)
@@ -87,9 +88,8 @@
  , _block_of_op(0) // initialized later with correct length
  , _has_info(0)
  , _has_call(0)
+ , _interval_in_loop(0)  // initialized later with correct length
  , _scope_value_cache(0) // initialized later with correct length
- , _interval_in_loop(0)  // initialized later with correct length
- , _cached_blocks(*ir->linear_scan_order())
 #ifdef X86
  , _fpu_stack_allocator(NULL)
 #endif
@@ -3717,13 +3717,13 @@
 
 MoveResolver::MoveResolver(LinearScan* allocator) :
   _allocator(allocator),
-  _multiple_reads_allowed(false),
+  _insert_list(NULL),
+  _insert_idx(-1),
+  _insertion_buffer(),
   _mapping_from(8),
   _mapping_from_opr(8),
   _mapping_to(8),
-  _insert_list(NULL),
-  _insert_idx(-1),
-  _insertion_buffer()
+  _multiple_reads_allowed(false)
 {
   for (int i = 0; i < LinearScan::nof_regs; i++) {
     _register_blocked[i] = 0;
@@ -4127,9 +4127,9 @@
   _split_children(0),
   _canonical_spill_slot(-1),
   _insert_move_when_activated(false),
-  _register_hint(NULL),
   _spill_state(noDefinitionFound),
-  _spill_definition_pos(-1)
+  _spill_definition_pos(-1),
+  _register_hint(NULL)
 {
   _split_parent = this;
   _current_split_child = this;
--- a/src/hotspot/share/c1/c1_Optimizer.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_Optimizer.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -47,7 +47,7 @@
   int _has_substitution;
 
  public:
-  CE_Eliminator(IR* hir) : _cee_count(0), _ifop_count(0), _hir(hir) {
+  CE_Eliminator(IR* hir) : _hir(hir), _cee_count(0), _ifop_count(0) {
     _has_substitution = false;
     _hir->iterate_preorder(this);
     if (_has_substitution) {
@@ -592,10 +592,10 @@
   // constructor
   NullCheckEliminator(Optimizer* opt)
     : _opt(opt)
+    , _work_list(new BlockList())
     , _set(new ValueSet())
-    , _last_explicit_null_check(NULL)
     , _block_states(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), NULL)
-    , _work_list(new BlockList()) {
+    , _last_explicit_null_check(NULL) {
     _visitable_instructions = new ValueSet();
     _visitor.set_eliminator(this);
     CompileLog* log = _opt->ir()->compilation()->log();
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -568,6 +568,7 @@
     if (log_is_enabled(Info, exceptions)) {
       ResourceMark rm;
       stringStream tempst;
+      assert(nm->method() != NULL, "Unexpected NULL method()");
       tempst.print("compiled method <%s>\n"
                    " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
                    nm->method()->print_value_string(), p2i(pc), p2i(thread));
--- a/src/hotspot/share/c1/c1_ValueMap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_ValueMap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -485,9 +485,9 @@
 
 
 GlobalValueNumbering::GlobalValueNumbering(IR* ir)
-  : _current_map(NULL)
+  : _compilation(ir->compilation())
+  , _current_map(NULL)
   , _value_maps(ir->linear_scan_order()->length(), ir->linear_scan_order()->length(), NULL)
-  , _compilation(ir->compilation())
 {
   TRACE_VALUE_NUMBERING(tty->print_cr("****** start of global value numbering"));
 
--- a/src/hotspot/share/c1/c1_ValueType.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/c1/c1_ValueType.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -104,7 +104,7 @@
   const ValueTag _tag;
   ValueType();
  protected:
-  ValueType(ValueTag tag, int size): _tag(tag), _size(size) {}
+  ValueType(ValueTag tag, int size): _size(size), _tag(tag) {}
 
  public:
   // initialization
--- a/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/ci/bcEscapeAnalyzer.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1447,8 +1447,8 @@
 #endif
 
 BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
-    : _conservative(method == NULL || !EstimateArgEscape)
-    , _arena(CURRENT_ENV->arena())
+    : _arena(CURRENT_ENV->arena())
+    , _conservative(method == NULL || !EstimateArgEscape)
     , _method(method)
     , _methodData(method ? method->method_data() : NULL)
     , _arg_size(method ? method->arg_size() : 0)
--- a/src/hotspot/share/ci/ciMethod.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/ci/ciMethod.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -166,16 +166,16 @@
   ciMetadata((Metadata*)NULL),
   _name(                   name),
   _holder(                 holder),
+  _method_data(            NULL),
+  _method_blocks(          NULL),
   _intrinsic_id(           vmIntrinsics::_none),
-  _liveness(               NULL),
+  _instructions_size(-1),
   _can_be_statically_bound(false),
-  _method_blocks(          NULL),
-  _method_data(            NULL)
+  _liveness(               NULL)
 #if defined(COMPILER2)
   ,
   _flow(                   NULL),
-  _bcea(                   NULL),
-  _instructions_size(-1)
+  _bcea(                   NULL)
 #endif // COMPILER2
 {
   // Usually holder and accessor are the same type but in some cases
--- a/src/hotspot/share/ci/ciMethodBlocks.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/ci/ciMethodBlocks.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -345,13 +345,13 @@
 }
 #endif
 
-
 ciBlock::ciBlock(ciMethod *method, int index, int start_bci) :
+                         _idx(index), _start_bci(start_bci), _limit_bci(-1), _control_bci(fall_through_bci),
+                         _flags(0), _ex_start_bci(-1), _ex_limit_bci(-1)
 #ifndef PRODUCT
-                         _method(method),
+                         , _method(method)
 #endif
-                         _idx(index), _flags(0), _start_bci(start_bci), _limit_bci(-1), _control_bci(fall_through_bci),
-                         _ex_start_bci(-1), _ex_limit_bci(-1) {
+{
 }
 
 void ciBlock::set_exception_range(int start_bci, int limit_bci)  {
--- a/src/hotspot/share/ci/ciTypeFlow.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/ci/ciTypeFlow.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -724,8 +724,8 @@
 
   public:
     Loop(Block* head, Block* tail) :
+      _parent(NULL), _sibling(NULL), _child(NULL),
       _head(head),   _tail(tail),
-      _parent(NULL), _sibling(NULL), _child(NULL),
       _irreducible(false), _def_locals() {}
 
     Loop* parent()  const { return _parent; }
--- a/src/hotspot/share/classfile/classFileParser.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -922,10 +922,10 @@
   assert(has_nonstatic_concrete_methods != NULL, "invariant");
 
   if (itfs_len == 0) {
-    _local_interfaces = Universe::the_empty_klass_array();
+    _local_interfaces = Universe::the_empty_instance_klass_array();
   } else {
     assert(itfs_len > 0, "only called for len>0");
-    _local_interfaces = MetadataFactory::new_array<Klass*>(_loader_data, itfs_len, NULL, CHECK);
+    _local_interfaces = MetadataFactory::new_array<InstanceKlass*>(_loader_data, itfs_len, NULL, CHECK);
 
     int index;
     for (index = 0; index < itfs_len; index++) {
@@ -966,7 +966,7 @@
       if (InstanceKlass::cast(interf)->has_nonstatic_concrete_methods()) {
         *has_nonstatic_concrete_methods = true;
       }
-      _local_interfaces->at_put(index, interf);
+      _local_interfaces->at_put(index, InstanceKlass::cast(interf));
     }
 
     if (!_need_verify || itfs_len <= 1) {
@@ -984,8 +984,8 @@
     {
       debug_only(NoSafepointVerifier nsv;)
       for (index = 0; index < itfs_len; index++) {
-        const Klass* const k = _local_interfaces->at(index);
-        name = InstanceKlass::cast(k)->name();
+        const InstanceKlass* const k = _local_interfaces->at(index);
+        name = k->name();
         // If no duplicates, add (name, NULL) in hashtable interface_names.
         if (!put_after_lookup(name, NULL, interface_names)) {
           dup = true;
@@ -3572,6 +3572,9 @@
                          "Nest-host class_info_index %u has bad constant type in class file %s",
                          class_info_index, CHECK);
           _nest_host = class_info_index;
+        } else {
+          // Unknown attribute
+          cfs->skip_u1(attribute_length, CHECK);
         }
       } else {
         // Unknown attribute
@@ -4493,7 +4496,7 @@
     }
 
     // add super interface dependencies
-    const Array<Klass*>* const local_interfaces = defined_klass->local_interfaces();
+    const Array<InstanceKlass*>* const local_interfaces = defined_klass->local_interfaces();
     if (local_interfaces != NULL) {
       const int length = local_interfaces->length();
       for (int i = 0; i < length; i++) {
@@ -4505,21 +4508,21 @@
 
 // utility methods for appending an array with check for duplicates
 
-static void append_interfaces(GrowableArray<Klass*>* result,
-                              const Array<Klass*>* const ifs) {
+static void append_interfaces(GrowableArray<InstanceKlass*>* result,
+                              const Array<InstanceKlass*>* const ifs) {
   // iterate over new interfaces
   for (int i = 0; i < ifs->length(); i++) {
-    Klass* const e = ifs->at(i);
-    assert(e->is_klass() && InstanceKlass::cast(e)->is_interface(), "just checking");
+    InstanceKlass* const e = ifs->at(i);
+    assert(e->is_klass() && e->is_interface(), "just checking");
     // add new interface
     result->append_if_missing(e);
   }
 }
 
-static Array<Klass*>* compute_transitive_interfaces(const InstanceKlass* super,
-                                                    Array<Klass*>* local_ifs,
-                                                    ClassLoaderData* loader_data,
-                                                    TRAPS) {
+static Array<InstanceKlass*>* compute_transitive_interfaces(const InstanceKlass* super,
+                                                            Array<InstanceKlass*>* local_ifs,
+                                                            ClassLoaderData* loader_data,
+                                                            TRAPS) {
   assert(local_ifs != NULL, "invariant");
   assert(loader_data != NULL, "invariant");
 
@@ -4534,15 +4537,15 @@
   // Add local interfaces' super interfaces
   const int local_size = local_ifs->length();
   for (int i = 0; i < local_size; i++) {
-    Klass* const l = local_ifs->at(i);
-    max_transitive_size += InstanceKlass::cast(l)->transitive_interfaces()->length();
+    InstanceKlass* const l = local_ifs->at(i);
+    max_transitive_size += l->transitive_interfaces()->length();
   }
   // Finally add local interfaces
   max_transitive_size += local_size;
   // Construct array
   if (max_transitive_size == 0) {
     // no interfaces, use canonicalized array
-    return Universe::the_empty_klass_array();
+    return Universe::the_empty_instance_klass_array();
   } else if (max_transitive_size == super_size) {
     // no new local interfaces added, share superklass' transitive interface array
     return super->transitive_interfaces();
@@ -4551,7 +4554,7 @@
     return local_ifs;
   } else {
     ResourceMark rm;
-    GrowableArray<Klass*>* const result = new GrowableArray<Klass*>(max_transitive_size);
+    GrowableArray<InstanceKlass*>* const result = new GrowableArray<InstanceKlass*>(max_transitive_size);
 
     // Copy down from superclass
     if (super != NULL) {
@@ -4560,8 +4563,8 @@
 
     // Copy down from local interfaces' superinterfaces
     for (int i = 0; i < local_size; i++) {
-      Klass* const l = local_ifs->at(i);
-      append_interfaces(result, InstanceKlass::cast(l)->transitive_interfaces());
+      InstanceKlass* const l = local_ifs->at(i);
+      append_interfaces(result, l->transitive_interfaces());
     }
     // Finally add local interfaces
     append_interfaces(result, local_ifs);
@@ -4569,10 +4572,10 @@
     // length will be less than the max_transitive_size if duplicates were removed
     const int length = result->length();
     assert(length <= max_transitive_size, "just checking");
-    Array<Klass*>* const new_result =
-      MetadataFactory::new_array<Klass*>(loader_data, length, CHECK_NULL);
+    Array<InstanceKlass*>* const new_result =
+      MetadataFactory::new_array<InstanceKlass*>(loader_data, length, CHECK_NULL);
     for (int i = 0; i < length; i++) {
-      Klass* const e = result->at(i);
+      InstanceKlass* const e = result->at(i);
       assert(e != NULL, "just checking");
       new_result->at_put(i, e);
     }
@@ -4640,17 +4643,17 @@
 
 static void check_super_interface_access(const InstanceKlass* this_klass, TRAPS) {
   assert(this_klass != NULL, "invariant");
-  const Array<Klass*>* const local_interfaces = this_klass->local_interfaces();
+  const Array<InstanceKlass*>* const local_interfaces = this_klass->local_interfaces();
   const int lng = local_interfaces->length();
   for (int i = lng - 1; i >= 0; i--) {
-    Klass* const k = local_interfaces->at(i);
+    InstanceKlass* const k = local_interfaces->at(i);
     assert (k != NULL && k->is_interface(), "invalid interface");
     Reflection::VerifyClassAccessResults vca_result =
-      Reflection::verify_class_access(this_klass, InstanceKlass::cast(k), false);
+      Reflection::verify_class_access(this_klass, k, false);
     if (vca_result != Reflection::ACCESS_OK) {
       ResourceMark rm(THREAD);
       char* msg = Reflection::verify_class_access_msg(this_klass,
-                                                      InstanceKlass::cast(k),
+                                                      k,
                                                       vca_result);
       if (msg == NULL) {
         bool same_module = (this_klass->module() == k->module());
@@ -5732,11 +5735,11 @@
                    ik->java_super()->external_name());
       }
       // print out each of the interface classes referred to by this class.
-      const Array<Klass*>* const local_interfaces = ik->local_interfaces();
+      const Array<InstanceKlass*>* const local_interfaces = ik->local_interfaces();
       if (local_interfaces != NULL) {
         const int length = local_interfaces->length();
         for (int i = 0; i < length; i++) {
-          const Klass* const k = local_interfaces->at(i);
+          const InstanceKlass* const k = local_interfaces->at(i);
           const char * to = k->external_name();
           log_debug(class, resolve)("%s %s (interface)", from, to);
         }
@@ -6269,7 +6272,7 @@
   assert(_loader_data != NULL, "invariant");
 
   if (_class_name == vmSymbols::java_lang_Object()) {
-    check_property(_local_interfaces == Universe::the_empty_klass_array(),
+    check_property(_local_interfaces == Universe::the_empty_instance_klass_array(),
                    "java.lang.Object cannot implement an interface in class file %s",
                    CHECK);
   }
--- a/src/hotspot/share/classfile/classFileParser.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classFileParser.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -99,8 +99,8 @@
   Array<u2>* _inner_classes;
   Array<u2>* _nest_members;
   u2 _nest_host;
-  Array<Klass*>* _local_interfaces;
-  Array<Klass*>* _transitive_interfaces;
+  Array<InstanceKlass*>* _local_interfaces;
+  Array<InstanceKlass*>* _transitive_interfaces;
   Annotations* _combined_annotations;
   AnnotationArray* _annotations;
   AnnotationArray* _type_annotations;
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -95,7 +95,7 @@
   ClassLoaderDataGraph::_head = _the_null_class_loader_data;
   assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
 
-  LogTarget(Debug, class, loader, data) lt;
+  LogTarget(Trace, class, loader, data) lt;
   if (lt.is_enabled()) {
     ResourceMark rm;
     LogStream ls(lt);
@@ -142,19 +142,22 @@
 }
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
-  _is_anonymous(is_anonymous),
+  _metaspace(NULL),
+  _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
+                            Monitor::_safepoint_check_never)),
+  _unloading(false), _is_anonymous(is_anonymous),
+  _modified_oops(true), _accumulated_modified_oops(false),
   // An anonymous class loader data doesn't have anything to keep
   // it from being unloaded during parsing of the anonymous class.
   // The null-class-loader should always be kept alive.
   _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
-  _metaspace(NULL), _unloading(false), _klasses(NULL),
-  _modules(NULL), _packages(NULL), _unnamed_module(NULL), _dictionary(NULL),
-  _claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
-  _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
+  _claimed(0),
+  _handles(),
+  _klasses(NULL), _packages(NULL), _modules(NULL), _unnamed_module(NULL), _dictionary(NULL),
+  _jmethod_ids(NULL),
+  _deallocate_list(NULL),
   _next(NULL),
-  _class_loader_klass(NULL), _name(NULL), _name_and_id(NULL),
-  _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
-                            Monitor::_safepoint_check_never)) {
+  _class_loader_klass(NULL), _name(NULL), _name_and_id(NULL) {
 
   if (!h_class_loader.is_null()) {
     _class_loader = _handles.add(h_class_loader());
@@ -592,7 +595,7 @@
 void ClassLoaderData::unload() {
   _unloading = true;
 
-  LogTarget(Debug, class, loader, data) lt;
+  LogTarget(Trace, class, loader, data) lt;
   if (lt.is_enabled()) {
     ResourceMark rm;
     LogStream ls(lt);
@@ -603,7 +606,7 @@
 
   // Some items on the _deallocate_list need to free their C heap structures
   // if they are not already on the _klasses list.
-  unload_deallocate_list();
+  free_deallocate_list_C_heap_structures();
 
   // Tell serviceability tools these classes are unloading
   // after erroneous classes are released.
@@ -846,7 +849,7 @@
 }
 
 // Add this metadata pointer to be freed when it's safe.  This is only during
-// class unloading because Handles might point to this metadata field.
+// a safepoint which checks if handles point to this metadata field.
 void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
   // Metadata in shared region isn't deleted.
   if (!m->is_shared()) {
@@ -855,6 +858,8 @@
       _deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, true);
     }
     _deallocate_list->append_if_missing(m);
+    log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
+    ClassLoaderDataGraph::set_should_clean_deallocate_lists();
   }
 }
 
@@ -888,16 +893,52 @@
       assert(!m->is_klass() || !((InstanceKlass*)m)->is_scratch_class(),
              "scratch classes on this list should be dead");
       // Also should assert that other metadata on the list was found in handles.
+      // Some cleaning remains.
+      ClassLoaderDataGraph::set_should_clean_deallocate_lists();
     }
   }
 }
 
+void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
+  uint loaders_processed = 0;
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    // is_alive check will be necessary for concurrent class unloading.
+    if (cld->is_alive()) {
+      // clean metaspace
+      if (walk_previous_versions) {
+        cld->classes_do(InstanceKlass::purge_previous_versions);
+      }
+      cld->free_deallocate_list();
+      loaders_processed++;
+    }
+  }
+  log_debug(class, loader, data)("clean_deallocate_lists: loaders processed %u %s",
+                                 loaders_processed, walk_previous_versions ? "walk_previous_versions" : "");
+}
+
+void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
+
+  _should_clean_deallocate_lists = false; // assume everything gets cleaned
+
+  // Mark metadata seen on the stack so we can delete unreferenced entries.
+  // Walk all metadata, including the expensive code cache walk, only for class redefinition.
+  // The MetadataOnStackMark walk during redefinition saves previous versions if it finds old methods
+  // on the stack or in the code cache, so we only have to repeat the full walk if
+  // they were found at that time.
+  // TODO: have redefinition clean old methods out of the code cache.  They still exist in some places.
+  bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset();
+
+  MetadataOnStackMark md_on_stack(walk_all_metadata);
+  clean_deallocate_lists(walk_all_metadata);
+}
+
 // This is distinct from free_deallocate_list.  For class loader data that are
 // unloading, this frees the C heap memory for items on the list, and unlinks
 // scratch or error classes so that unloading events aren't triggered for these
 // classes. The metadata is removed with the unloading metaspace.
 // There isn't C heap memory allocated for methods, so nothing is done for them.
-void ClassLoaderData::unload_deallocate_list() {
+void ClassLoaderData::free_deallocate_list_C_heap_structures() {
   // Don't need lock, at safepoint
   assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
   assert(is_unloading(), "only called for ClassLoaderData that are unloading");
@@ -907,7 +948,6 @@
   // Go backwards because this removes entries that are freed.
   for (int i = _deallocate_list->length() - 1; i >= 0; i--) {
     Metadata* m = _deallocate_list->at(i);
-    assert (!m->on_stack(), "wouldn't be unloading if this were so");
     _deallocate_list->remove_at(i);
     if (m->is_constantPool()) {
       ((ConstantPool*)m)->release_C_heap_structures();
@@ -1023,6 +1063,8 @@
 ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
 
 bool ClassLoaderDataGraph::_should_purge = false;
+bool ClassLoaderDataGraph::_should_clean_deallocate_lists = false;
+bool ClassLoaderDataGraph::_safepoint_cleanup_needed = false;
 bool ClassLoaderDataGraph::_metaspace_oom = false;
 
 // Add a new class loader data node to the list.  Assign the newly created
@@ -1053,7 +1095,7 @@
     cld->set_next(next);
     ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
     if (exchanged == next) {
-      LogTarget(Debug, class, loader, data) lt;
+      LogTarget(Trace, class, loader, data) lt;
       if (lt.is_enabled()) {
         ResourceMark rm;
         LogStream ls(lt);
@@ -1334,7 +1376,10 @@
 
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
-bool ClassLoaderDataGraph::do_unloading(bool clean_previous_versions) {
+bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
+
+  // Indicate whether safepoint cleanup is needed.
+  _safepoint_cleanup_needed |= do_cleaning;
 
   ClassLoaderData* data = _head;
   ClassLoaderData* prev = NULL;
@@ -1342,15 +1387,6 @@
   uint loaders_processed = 0;
   uint loaders_removed = 0;
 
-  // Mark metadata seen on the stack only so we can delete unneeded entries.
-  // Only walk all metadata, including the expensive code cache walk, for Full GC
-  // and only if class redefinition and if there's previous versions of
-  // Klasses to delete.
-  bool walk_all_metadata = clean_previous_versions &&
-                           JvmtiExport::has_redefined_a_class() &&
-                           InstanceKlass::has_previous_versions_and_reset();
-  MetadataOnStackMark md_on_stack(walk_all_metadata);
-
   // Save previous _unloading pointer for CMS which may add to unloading list before
   // purging and we don't want to rewalk the previously unloaded class loader data.
   _saved_unloading = _unloading;
@@ -1358,11 +1394,6 @@
   data = _head;
   while (data != NULL) {
     if (data->is_alive()) {
-      // clean metaspace
-      if (walk_all_metadata) {
-        data->classes_do(InstanceKlass::purge_previous_versions);
-      }
-      data->free_deallocate_list();
       prev = data;
       data = data->next();
       loaders_processed++;
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -79,6 +79,12 @@
   static ClassLoaderData* _saved_head;
   static ClassLoaderData* _saved_unloading;
   static bool _should_purge;
+
+  // Set if there's anything to purge in the deallocate lists or previous versions
+  // during a safepoint after class unloading in a full GC.
+  static bool _should_clean_deallocate_lists;
+  static bool _safepoint_cleanup_needed;
+
   // OOM has been seen in metaspace allocation. Used to prevent some
   // allocations until class unloading
   static bool _metaspace_oom;
@@ -88,6 +94,7 @@
 
   static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous);
   static ClassLoaderData* add(Handle class_loader, bool anonymous);
+
  public:
   static ClassLoaderData* find_or_create(Handle class_loader);
   static void purge();
@@ -116,7 +123,13 @@
   static void packages_unloading_do(void f(PackageEntry*));
   static void loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
-  static bool do_unloading(bool clean_previous_versions);
+  static bool do_unloading(bool do_cleaning);
+
+  // Expose state to avoid logging overhead in safepoint cleanup tasks.
+  static inline bool should_clean_metaspaces_and_reset();
+  static void set_should_clean_deallocate_lists() { _should_clean_deallocate_lists = true; }
+  static void clean_deallocate_lists(bool purge_previous_versions);
+  static void walk_metadata_and_clean_metaspaces();
 
   // dictionary do
   // Iterate over all klasses in dictionary, but
@@ -185,7 +198,7 @@
       volatile juint _size;
       Chunk* _next;
 
-      Chunk(Chunk* c) : _next(c), _size(0) { }
+      Chunk(Chunk* c) : _size(0), _next(c) { }
     };
 
     Chunk* volatile _head;
@@ -297,8 +310,8 @@
   void packages_do(void f(PackageEntry*));
 
   // Deallocate free list during class unloading.
-  void free_deallocate_list();      // for the classes that are not unloaded
-  void unload_deallocate_list();    // for the classes that are unloaded
+  void free_deallocate_list();                      // for the classes that are not unloaded
+  void free_deallocate_list_C_heap_structures();    // for the classes that are unloaded
 
   // Allocate out of this class loader data
   MetaWord* allocate(size_t size);
--- a/src/hotspot/share/classfile/classLoaderData.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -27,6 +27,7 @@
 
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
+#include "oops/instanceKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oopHandle.inline.hpp"
 #include "oops/weakHandle.inline.hpp"
@@ -92,4 +93,10 @@
   Atomic::sub(count, &_num_array_classes);
 }
 
+bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
+  bool do_cleaning = _safepoint_cleanup_needed;
+  _safepoint_cleanup_needed = false;  // reset
+  return (do_cleaning && _should_clean_deallocate_lists) || InstanceKlass::has_previous_versions();
+}
+
 #endif // SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
--- a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -299,7 +299,7 @@
 
   LoaderTreeNode(const oop loader_oop)
     : _loader_oop(loader_oop), _cld(NULL), _child(NULL), _next(NULL),
-      _classes(NULL), _anon_classes(NULL), _num_classes(0), _num_anon_classes(0),
+      _classes(NULL), _num_classes(0), _anon_classes(NULL), _num_anon_classes(0),
       _num_folded(0)
     {}
 
--- a/src/hotspot/share/classfile/classLoaderStats.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderStats.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -87,8 +87,8 @@
     _chunk_sz(0),
     _block_sz(0),
     _classes_count(0),
+    _anon_chunk_sz(0),
     _anon_block_sz(0),
-    _anon_chunk_sz(0),
     _anon_classes_count(0) {
   }
 };
@@ -118,11 +118,11 @@
 public:
   ClassLoaderStatsClosure(outputStream* out) :
     _out(out),
+    _stats(new StatsTable()),
     _total_loaders(0),
-    _total_block_sz(0),
+    _total_classes(0),
     _total_chunk_sz(0),
-    _total_classes(0),
-    _stats(new StatsTable()) {
+    _total_block_sz(0) {
   }
 
   virtual void do_cld(ClassLoaderData* cld);
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -688,8 +688,8 @@
 
  public:
   FindMethodsByErasedSig(Symbol* name, Symbol* signature, bool is_interf) :
-      _method_name(name), _method_signature(signature), _cur_class_is_interface(is_interf),
-      _family(NULL) {}
+      _method_name(name), _method_signature(signature), _family(NULL),
+      _cur_class_is_interface(is_interf) {}
 
   void get_discovered_family(MethodFamily** family) {
       if (_family != NULL) {
--- a/src/hotspot/share/classfile/dictionary.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/dictionary.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -53,16 +53,16 @@
 }
 
 Dictionary::Dictionary(ClassLoaderData* loader_data, int table_size, bool resizable)
-  : _loader_data(loader_data), _resizable(resizable), _needs_resizing(false),
-  Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size()) {
+  : Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size()),
+    _resizable(resizable), _needs_resizing(false), _loader_data(loader_data) {
 };
 
 
 Dictionary::Dictionary(ClassLoaderData* loader_data,
                        int table_size, HashtableBucket<mtClass>* t,
                        int number_of_entries, bool resizable)
-  : _loader_data(loader_data), _resizable(resizable), _needs_resizing(false),
-  Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size(), t, number_of_entries) {
+  : Hashtable<InstanceKlass*, mtClass>(table_size, (int)entry_size(), t, number_of_entries),
+    _resizable(resizable), _needs_resizing(false), _loader_data(loader_data) {
 };
 
 Dictionary::~Dictionary() {
--- a/src/hotspot/share/classfile/javaClasses.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -2005,7 +2005,7 @@
  public:
 
   // constructor for new backtrace
-  BacktraceBuilder(TRAPS): _methods(NULL), _bcis(NULL), _head(NULL), _mirrors(NULL), _names(NULL) {
+  BacktraceBuilder(TRAPS): _head(NULL), _methods(NULL), _bcis(NULL), _mirrors(NULL), _names(NULL) {
     expand(CHECK);
     _backtrace = Handle(THREAD, _head);
     _index = 0;
@@ -2102,7 +2102,7 @@
   Symbol* _name;
   Handle _mirror;
   BacktraceElement(Handle mirror, int mid, int version, int bci, Symbol* name) :
-                   _mirror(mirror), _method_id(mid), _version(version), _bci(bci), _name(name) {}
+                   _method_id(mid), _bci(bci), _version(version), _name(name), _mirror(mirror) {}
 };
 
 class BacktraceIterator : public StackObj {
--- a/src/hotspot/share/classfile/stackMapFrame.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/stackMapFrame.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -33,8 +33,8 @@
 
 StackMapFrame::StackMapFrame(u2 max_locals, u2 max_stack, ClassVerifier* v) :
                       _offset(0), _locals_size(0), _stack_size(0),
-                      _stack_mark(0), _flags(0), _max_locals(max_locals),
-                      _max_stack(max_stack), _verifier(v) {
+                      _stack_mark(0), _max_locals(max_locals),
+                      _max_stack(max_stack), _flags(0), _verifier(v) {
   Thread* thr = v->thread();
   _locals = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_locals);
   _stack = NEW_RESOURCE_ARRAY_IN_THREAD(thr, VerificationType, max_stack);
--- a/src/hotspot/share/classfile/stackMapFrame.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/stackMapFrame.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -105,12 +105,12 @@
                 u2 max_stack,
                 VerificationType* locals,
                 VerificationType* stack,
-                ClassVerifier* v) : _offset(offset), _flags(flags),
+                ClassVerifier* v) : _offset(offset),
                                     _locals_size(locals_size),
                                     _stack_size(stack_size),
                                     _stack_mark(-1),
                                     _max_locals(max_locals),
-                                    _max_stack(max_stack),
+                                    _max_stack(max_stack),  _flags(flags),
                                     _locals(locals), _stack(stack),
                                     _verifier(v) { }
 
--- a/src/hotspot/share/classfile/stringTable.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/stringTable.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -122,7 +122,7 @@
 
  public:
   StringTableLookupJchar(Thread* thread, uintx hash, const jchar* key, int len)
-    : _thread(thread), _hash(hash), _str(key), _len(len) {
+    : _thread(thread), _hash(hash), _len(len), _str(key) {
   }
   uintx get_hash() const {
     return _hash;
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -242,12 +242,23 @@
 }
 
 
-// Forwards to resolve_instance_class_or_null
+// Forwards to resolve_array_class_or_null or resolve_instance_class_or_null
 
 Klass* SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) {
   if (FieldType::is_array(class_name)) {
     return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
-  } else if (FieldType::is_obj(class_name)) {
+  } else {
+    return resolve_instance_class_or_null_helper(class_name, class_loader, protection_domain, THREAD);
+  }
+}
+
+// name may be in the form of "java/lang/Object" or "Ljava/lang/Object;"
+InstanceKlass* SystemDictionary::resolve_instance_class_or_null_helper(Symbol* class_name,
+                                                                       Handle class_loader,
+                                                                       Handle protection_domain,
+                                                                       TRAPS) {
+  assert(class_name != NULL && !FieldType::is_array(class_name), "must be");
+  if (FieldType::is_obj(class_name)) {
     ResourceMark rm(THREAD);
     // Ignore wrapping L and ;.
     TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
@@ -330,17 +341,18 @@
 // placeholders()->find_and_add(PlaceholderTable::LOAD_SUPER),
 // you need to find_and_remove it before returning.
 // So be careful to not exit with a CHECK_ macro betweeen these calls.
-Klass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
-                                                 Symbol* class_name,
-                                                 Handle class_loader,
-                                                 Handle protection_domain,
-                                                 bool is_superclass,
-                                                 TRAPS) {
+InstanceKlass* SystemDictionary::resolve_super_or_fail(Symbol* child_name,
+                                                       Symbol* super_name,
+                                                       Handle class_loader,
+                                                       Handle protection_domain,
+                                                       bool is_superclass,
+                                                       TRAPS) {
+  assert(!FieldType::is_array(super_name), "invalid super class name");
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
     // Special processing for CDS dump time.
-    Klass* k = SystemDictionaryShared::dump_time_resolve_super_or_fail(child_name,
-        class_name, class_loader, protection_domain, is_superclass, CHECK_NULL);
+    InstanceKlass* k = SystemDictionaryShared::dump_time_resolve_super_or_fail(child_name,
+        super_name, class_loader, protection_domain, is_superclass, CHECK_NULL);
     if (k) {
       return k;
     }
@@ -372,18 +384,17 @@
   bool throw_circularity_error = false;
   {
     MutexLocker mu(SystemDictionary_lock, THREAD);
-    Klass* childk = find_class(d_hash, child_name, dictionary);
-    Klass* quicksuperk;
+    InstanceKlass* childk = find_class(d_hash, child_name, dictionary);
+    InstanceKlass* quicksuperk;
     // to support // loading: if child done loading, just return superclass
-    // if class_name, & class_loader don't match:
+    // if super_name, & class_loader don't match:
     // if initial define, SD update will give LinkageError
     // if redefine: compare_class_versions will give HIERARCHY_CHANGED
     // so we don't throw an exception here.
     // see: nsk redefclass014 & java.lang.instrument Instrument032
     if ((childk != NULL ) && (is_superclass) &&
-       ((quicksuperk = childk->super()) != NULL) &&
-
-         ((quicksuperk->name() == class_name) &&
+        ((quicksuperk = childk->java_super()) != NULL) &&
+         ((quicksuperk->name() == super_name) &&
             (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) {
            return quicksuperk;
     } else {
@@ -394,7 +405,7 @@
     }
     if (!throw_circularity_error) {
       // Be careful not to exit resolve_super
-      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, loader_data, PlaceholderTable::LOAD_SUPER, class_name, THREAD);
+      PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, loader_data, PlaceholderTable::LOAD_SUPER, super_name, THREAD);
     }
   }
   if (throw_circularity_error) {
@@ -403,12 +414,13 @@
   }
 
 // java.lang.Object should have been found above
-  assert(class_name != NULL, "null super class for resolving");
+  assert(super_name != NULL, "null super class for resolving");
   // Resolve the super class or interface, check results on return
-  Klass* superk = SystemDictionary::resolve_or_null(class_name,
-                                                    class_loader,
-                                                    protection_domain,
-                                                    THREAD);
+  InstanceKlass* superk =
+    SystemDictionary::resolve_instance_class_or_null_helper(super_name,
+                                                            class_loader,
+                                                            protection_domain,
+                                                            THREAD);
 
   // Clean up of placeholders moved so that each classloadAction registrar self-cleans up
   // It is no longer necessary to keep the placeholder table alive until update_dictionary
@@ -423,7 +435,11 @@
   }
   if (HAS_PENDING_EXCEPTION || superk == NULL) {
     // can null superk
-    superk = handle_resolution_exception(class_name, true, superk, THREAD);
+    Klass* k = handle_resolution_exception(super_name, true, superk, THREAD);
+    assert(k == NULL || k == superk, "must be");
+    if (k == NULL) {
+      superk = NULL;
+    }
   }
 
   return superk;
@@ -639,10 +655,12 @@
 // placeholders()->find_and_add(PlaceholderTable::LOAD_INSTANCE),
 // you need to find_and_remove it before returning.
 // So be careful to not exit with a CHECK_ macro betweeen these calls.
-Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
-                                                        Handle class_loader,
-                                                        Handle protection_domain,
-                                                        TRAPS) {
+//
+// name must be in the form of "java/lang/Object" -- cannot be "Ljava/lang/Object;"
+InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
+                                                                Handle class_loader,
+                                                                Handle protection_domain,
+                                                                TRAPS) {
   assert(name != NULL && !FieldType::is_array(name) &&
          !FieldType::is_obj(name), "invalid class name");
 
@@ -663,7 +681,7 @@
   // before we return a result we call out to java to check for valid protection domain
   // to allow returning the Klass* and add it to the pd_set if it is valid
   {
-    Klass* probe = dictionary->find(d_hash, name, protection_domain);
+    InstanceKlass* probe = dictionary->find(d_hash, name, protection_domain);
     if (probe != NULL) return probe;
   }
 
@@ -706,7 +724,7 @@
     MutexLocker mu(SystemDictionary_lock, THREAD);
     InstanceKlass* check = find_class(d_hash, name, dictionary);
     if (check != NULL) {
-      // Klass is already loaded, so just return it
+      // InstanceKlass is already loaded, so just return it
       class_has_been_loaded = true;
       k = check;
     } else {
@@ -877,7 +895,7 @@
   {
     ClassLoaderData* loader_data = k->class_loader_data();
     MutexLocker mu(SystemDictionary_lock, THREAD);
-    Klass* kk = find_class(name, loader_data);
+    InstanceKlass* kk = find_class(name, loader_data);
     assert(kk == k, "should be present in dictionary");
   }
 #endif
@@ -1308,11 +1326,11 @@
       }
     }
 
-    Array<Klass*>* interfaces = ik->local_interfaces();
+    Array<InstanceKlass*>* interfaces = ik->local_interfaces();
     int num_interfaces = interfaces->length();
     for (int index = 0; index < num_interfaces; index++) {
-      Klass* k = interfaces->at(index);
-      Symbol*  name  = k->name();
+      InstanceKlass* k = interfaces->at(index);
+      Symbol* name  = k->name();
       Klass* i = resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_NULL);
       if (k != i) {
         // The dynamically resolved interface class is not the same as the one we used during dump time,
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -280,12 +280,12 @@
   // Resolve a superclass or superinterface. Called from ClassFileParser,
   // parse_interfaces, resolve_instance_class_or_null, load_shared_class
   // "child_name" is the class whose super class or interface is being resolved.
-  static Klass* resolve_super_or_fail(Symbol* child_name,
-                                      Symbol* class_name,
-                                      Handle class_loader,
-                                      Handle protection_domain,
-                                      bool is_superclass,
-                                      TRAPS);
+  static InstanceKlass* resolve_super_or_fail(Symbol* child_name,
+                                              Symbol* class_name,
+                                              Handle class_loader,
+                                              Handle protection_domain,
+                                              bool is_superclass,
+                                              TRAPS);
 
   // Parse new stream. This won't update the dictionary or
   // class hierarchy, simply parse the stream. Used by JVMTI RedefineClasses.
@@ -638,7 +638,11 @@
   static SymbolPropertyTable* invoke_method_table() { return _invoke_method_table; }
 
   // Basic loading operations
-  static Klass* resolve_instance_class_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS);
+  static InstanceKlass* resolve_instance_class_or_null_helper(Symbol* name,
+                                                              Handle class_loader,
+                                                              Handle protection_domain,
+                                                              TRAPS);
+  static InstanceKlass* resolve_instance_class_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS);
   static Klass* resolve_array_class_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS);
   static InstanceKlass* handle_parallel_super_load(Symbol* class_name, Symbol* supername, Handle class_loader, Handle protection_domain, Handle lockObject, TRAPS);
   // Wait on SystemDictionary_lock; unlocks lockObject before
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -505,9 +505,9 @@
 
       {
         MutexLocker mu(SystemDictionary_lock, THREAD);
-        Klass* check = find_class(d_hash, name, dictionary);
+        InstanceKlass* check = find_class(d_hash, name, dictionary);
         if (check != NULL) {
-          return InstanceKlass::cast(check);
+          return check;
         }
       }
 
@@ -524,10 +524,9 @@
                  Symbol* class_name, Handle class_loader, TRAPS) {
   assert(UseSharedSpaces, "must be");
   assert(shared_dictionary() != NULL, "already checked");
-  Klass* k = shared_dictionary()->find_class_for_builtin_loader(class_name);
+  InstanceKlass* ik = shared_dictionary()->find_class_for_builtin_loader(class_name);
 
-  if (k != NULL) {
-    InstanceKlass* ik = InstanceKlass::cast(k);
+  if (ik != NULL) {
     if ((ik->is_shared_app_class() &&
          SystemDictionary::is_system_class_loader(class_loader()))  ||
         (ik->is_shared_platform_class() &&
@@ -594,7 +593,7 @@
   }
 
   ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
-  Klass* k;
+  InstanceKlass* k;
 
   { // UNREGISTERED loader
     if (!shared_dictionary()->class_exists_for_unregistered_loader(class_name)) {
@@ -613,7 +612,7 @@
     return NULL;
   }
 
-  return acquire_class_for_current_thread(InstanceKlass::cast(k), class_loader,
+  return acquire_class_for_current_thread(k, class_loader,
                                           protection_domain, THREAD);
 }
 
@@ -672,7 +671,7 @@
 // java/lang/Object id: 0
 // Interface   id: 2 super: 0 source: cust.jar
 // ChildClass  id: 4 super: 0 interfaces: 2 source: cust.jar
-Klass* SystemDictionaryShared::dump_time_resolve_super_or_fail(
+InstanceKlass* SystemDictionaryShared::dump_time_resolve_super_or_fail(
     Symbol* child_name, Symbol* class_name, Handle class_loader,
     Handle protection_domain, bool is_superclass, TRAPS) {
 
@@ -700,14 +699,14 @@
 }
 
 struct SharedMiscInfo {
-  Klass* _klass;
+  InstanceKlass* _klass;
   int _clsfile_size;
   int _clsfile_crc32;
 };
 
 static GrowableArray<SharedMiscInfo>* misc_info_array = NULL;
 
-void SystemDictionaryShared::set_shared_class_misc_info(Klass* k, ClassFileStream* cfs) {
+void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs) {
   assert(DumpSharedSpaces, "only when dumping");
   int clsfile_size  = cfs->length();
   int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
@@ -731,7 +730,7 @@
   misc_info_array->append(misc_info);
 }
 
-void SystemDictionaryShared::init_shared_dictionary_entry(Klass* k, DictionaryEntry* ent) {
+void SystemDictionaryShared::init_shared_dictionary_entry(InstanceKlass* k, DictionaryEntry* ent) {
   SharedDictionaryEntry* entry = (SharedDictionaryEntry*)ent;
   entry->_id = -1;
   entry->_clsfile_size = -1;
@@ -752,7 +751,7 @@
   }
 }
 
-bool SystemDictionaryShared::add_verification_constraint(Klass* k, Symbol* name,
+bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbol* name,
          Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
   assert(DumpSharedSpaces, "called at dump time only");
 
@@ -796,7 +795,7 @@
   entry->check_verification_constraints(klass, THREAD);
 }
 
-SharedDictionaryEntry* SharedDictionary::find_entry_for(Klass* klass) {
+SharedDictionaryEntry* SharedDictionary::find_entry_for(InstanceKlass* klass) {
   Symbol* class_name = klass->name();
   unsigned int hash = compute_hash(class_name);
   int index = hash_to_index(hash);
@@ -970,7 +969,7 @@
                               entry != NULL;
                               entry = entry->next()) {
     if (entry->hash() == hash) {
-      Klass* klass = (Klass*)entry->literal();
+      InstanceKlass* klass = entry->instance_klass();
       if (klass->name() == class_name && klass->class_loader_data() == loader_data) {
         // There is already a class defined with the same name
         return false;
@@ -993,22 +992,22 @@
 //-----------------
 
 
-Klass* SharedDictionary::find_class_for_builtin_loader(const Symbol* name) const {
+InstanceKlass* SharedDictionary::find_class_for_builtin_loader(const Symbol* name) const {
   SharedDictionaryEntry* entry = get_entry_for_builtin_loader(name);
-  return entry != NULL ? entry->instance_klass() : (Klass*)NULL;
+  return entry != NULL ? entry->instance_klass() : (InstanceKlass*)NULL;
 }
 
-Klass* SharedDictionary::find_class_for_unregistered_loader(const Symbol* name,
+InstanceKlass* SharedDictionary::find_class_for_unregistered_loader(const Symbol* name,
                                                             int clsfile_size,
                                                             int clsfile_crc32) const {
 
   const SharedDictionaryEntry* entry = get_entry_for_unregistered_loader(name,
                                                                          clsfile_size,
                                                                          clsfile_crc32);
-  return entry != NULL ? entry->instance_klass() : (Klass*)NULL;
+  return entry != NULL ? entry->instance_klass() : NULL;
 }
 
-void SharedDictionary::update_entry(Klass* klass, int id) {
+void SharedDictionary::update_entry(InstanceKlass* klass, int id) {
   assert(DumpSharedSpaces, "supported only when dumping");
   Symbol* class_name = klass->name();
   unsigned int hash = compute_hash(class_name);
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -132,7 +132,7 @@
 
   // See "Identifying the loader_type of archived classes" comments above.
   LoaderType loader_type() const {
-    Klass* k = (Klass*)literal();
+    InstanceKlass* k = instance_klass();
 
     if ((k->shared_classpath_index() != UNREGISTERED_INDEX)) {
       return LT_BUILTIN;
@@ -171,17 +171,17 @@
   }
 
 public:
-  SharedDictionaryEntry* find_entry_for(Klass* klass);
+  SharedDictionaryEntry* find_entry_for(InstanceKlass* klass);
   void finalize_verification_constraints();
 
   bool add_non_builtin_klass(const Symbol* class_name,
                              ClassLoaderData* loader_data,
                              InstanceKlass* obj);
 
-  void update_entry(Klass* klass, int id);
+  void update_entry(InstanceKlass* klass, int id);
 
-  Klass* find_class_for_builtin_loader(const Symbol* name) const;
-  Klass* find_class_for_unregistered_loader(const Symbol* name,
+  InstanceKlass* find_class_for_builtin_loader(const Symbol* name) const;
+  InstanceKlass* find_class_for_unregistered_loader(const Symbol* name,
                                             int clsfile_size,
                                             int clsfile_crc32) const;
   bool class_exists_for_unregistered_loader(const Symbol* name) {
@@ -317,7 +317,7 @@
 
   static bool add_non_builtin_klass(Symbol* class_name, ClassLoaderData* loader_data,
                                     InstanceKlass* k, TRAPS);
-  static Klass* dump_time_resolve_super_or_fail(Symbol* child_name,
+  static InstanceKlass* dump_time_resolve_super_or_fail(Symbol* child_name,
                                                 Symbol* class_name,
                                                 Handle class_loader,
                                                 Handle protection_domain,
@@ -327,7 +327,7 @@
   static size_t dictionary_entry_size() {
     return (DumpSharedSpaces) ? sizeof(SharedDictionaryEntry) : sizeof(DictionaryEntry);
   }
-  static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) NOT_CDS_RETURN;
+  static void init_shared_dictionary_entry(InstanceKlass* k, DictionaryEntry* entry) NOT_CDS_RETURN;
   static bool is_builtin(DictionaryEntry* ent) {
     // Can't use virtual function is_builtin because DictionaryEntry doesn't initialize
     // vtable because it's not constructed properly.
@@ -345,13 +345,13 @@
     return (SharedDictionary*)ClassLoaderData::the_null_class_loader_data()->dictionary();
   }
 
-  static void update_shared_entry(Klass* klass, int id) {
+  static void update_shared_entry(InstanceKlass* klass, int id) {
     assert(DumpSharedSpaces, "sanity");
     assert((SharedDictionary*)(klass->class_loader_data()->dictionary()) != NULL, "sanity");
     ((SharedDictionary*)(klass->class_loader_data()->dictionary()))->update_entry(klass, id);
   }
 
-  static void set_shared_class_misc_info(Klass* k, ClassFileStream* cfs);
+  static void set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs);
 
   static InstanceKlass* lookup_from_stream(const Symbol* class_name,
                                            Handle class_loader,
@@ -367,7 +367,7 @@
   // ensures that you cannot load a shared class if its super type(s) are changed. However,
   // we need an additional check to ensure that the verification_constraints did not change
   // between dump time and runtime.
-  static bool add_verification_constraint(Klass* k, Symbol* name,
+  static bool add_verification_constraint(InstanceKlass* k, Symbol* name,
                   Symbol* from_name, bool from_field_is_protected,
                   bool from_is_array, bool from_is_object) NOT_CDS_RETURN_(false);
   static void finalize_verification_constraints() NOT_CDS_RETURN;
--- a/src/hotspot/share/classfile/verifier.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/classfile/verifier.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -2678,10 +2678,10 @@
     VerificationType klass_type,
     VerificationType ref_class_type) {
   if (ref_class_type.equals(klass_type)) return true;
-  Array<Klass*>* local_interfaces = klass->local_interfaces();
+  Array<InstanceKlass*>* local_interfaces = klass->local_interfaces();
   if (local_interfaces != NULL) {
     for (int x = 0; x < local_interfaces->length(); x++) {
-      Klass* k = local_interfaces->at(x);
+      InstanceKlass* k = local_interfaces->at(x);
       assert (k != NULL && k->is_interface(), "invalid interface");
       if (ref_class_type.equals(VerificationType::reference_type(k->name()))) {
         return true;
--- a/src/hotspot/share/code/codeBlob.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -71,22 +71,22 @@
 }
 
 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
-  _name(name),
+  _type(type),
   _size(layout.size()),
   _header_size(layout.header_size()),
   _frame_complete_offset(frame_complete_offset),
   _data_offset(layout.data_offset()),
   _frame_size(frame_size),
-  _strings(CodeStrings()),
-  _oop_maps(oop_maps),
-  _caller_must_gc_arguments(caller_must_gc_arguments),
   _code_begin(layout.code_begin()),
   _code_end(layout.code_end()),
+  _content_begin(layout.content_begin()),
   _data_end(layout.data_end()),
   _relocation_begin(layout.relocation_begin()),
   _relocation_end(layout.relocation_end()),
-  _content_begin(layout.content_begin()),
-  _type(type)
+  _oop_maps(oop_maps),
+  _caller_must_gc_arguments(caller_must_gc_arguments),
+  _strings(CodeStrings()),
+  _name(name)
 {
   assert(is_aligned(layout.size(),            oopSize), "unaligned size");
   assert(is_aligned(layout.header_size(),     oopSize), "unaligned size");
@@ -99,21 +99,21 @@
 }
 
 CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
-  _name(name),
+  _type(type),
   _size(layout.size()),
   _header_size(layout.header_size()),
   _frame_complete_offset(frame_complete_offset),
   _data_offset(layout.data_offset()),
   _frame_size(frame_size),
-  _strings(CodeStrings()),
-  _caller_must_gc_arguments(caller_must_gc_arguments),
   _code_begin(layout.code_begin()),
   _code_end(layout.code_end()),
+  _content_begin(layout.content_begin()),
   _data_end(layout.data_end()),
   _relocation_begin(layout.relocation_begin()),
   _relocation_end(layout.relocation_end()),
-  _content_begin(layout.content_begin()),
-  _type(type)
+  _caller_must_gc_arguments(caller_must_gc_arguments),
+  _strings(CodeStrings()),
+  _name(name)
 {
   assert(is_aligned(_size,        oopSize), "unaligned size");
   assert(is_aligned(_header_size, oopSize), "unaligned size");
--- a/src/hotspot/share/code/codeBlob.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/codeBlob.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -268,10 +268,10 @@
     _content_offset(0),
     _code_offset(0),
     _data_offset(0),
+    _code_begin(code_begin),
+    _code_end(code_end),
     _content_begin(content_begin),
     _content_end(content_end),
-    _code_begin(code_begin),
-    _code_end(code_end),
     _data_end(data_end),
     _relocation_begin(relocation_begin),
     _relocation_end(relocation_end)
--- a/src/hotspot/share/code/compiledIC.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/compiledIC.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -252,6 +252,7 @@
 
   if (TraceICs) {
     ResourceMark rm;
+    assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
   }
--- a/src/hotspot/share/code/compiledIC.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/compiledIC.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -126,7 +126,7 @@
   }
 
   CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
-                    _to_interpreter(false), _to_aot(false), _is_optimized(false), _release_icholder(false) {
+                    _is_optimized(false), _to_interpreter(false), _to_aot(false), _release_icholder(false) {
   }
   ~CompiledICInfo() {
     // In rare cases the info is computed but not used, so release any
--- a/src/hotspot/share/code/compiledMethod.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -39,13 +39,13 @@
 
 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
-  _method(method), _mark_for_deoptimization_status(not_marked) {
+  _mark_for_deoptimization_status(not_marked), _method(method) {
   init_defaults();
 }
 
 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
-  _method(method), _mark_for_deoptimization_status(not_marked) {
+  _mark_for_deoptimization_status(not_marked), _method(method) {
   init_defaults();
 }
 
--- a/src/hotspot/share/code/dependencies.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/dependencies.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -509,9 +509,9 @@
     bool  _valid;
     void* _value;
    public:
-    DepArgument() : _is_oop(false), _value(NULL), _valid(false) {}
-    DepArgument(oop v): _is_oop(true), _value(v), _valid(true) {}
-    DepArgument(Metadata* v): _is_oop(false), _value(v), _valid(true) {}
+    DepArgument() : _is_oop(false), _valid(false), _value(NULL) {}
+    DepArgument(oop v): _is_oop(true), _valid(true), _value(v) {}
+    DepArgument(Metadata* v): _is_oop(false), _valid(true), _value(v) {}
 
     bool is_null() const               { return _value == NULL; }
     bool is_oop() const                { return _is_oop; }
@@ -582,15 +582,15 @@
 
   public:
     DepStream(Dependencies* deps)
-      : _deps(deps),
-        _code(NULL),
+      : _code(NULL),
+        _deps(deps),
         _bytes(deps->content_bytes())
     {
       initial_asserts(deps->size_in_bytes());
     }
     DepStream(nmethod* code)
-      : _deps(NULL),
-        _code(code),
+      : _code(code),
+        _deps(NULL),
         _bytes(code->dependencies_begin())
     {
       initial_asserts(code->dependencies_size());
@@ -716,7 +716,7 @@
     // iteration variables:
     ChangeType  _change_type;
     Klass*      _klass;
-    Array<Klass*>* _ti_base;    // i.e., transitive_interfaces
+    Array<InstanceKlass*>* _ti_base;    // i.e., transitive_interfaces
     int         _ti_index;
     int         _ti_limit;
 
--- a/src/hotspot/share/code/dependencyContext.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/dependencyContext.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -53,7 +53,7 @@
 
  public:
   nmethodBucket(nmethod* nmethod, nmethodBucket* next) :
-   _nmethod(nmethod), _next(next), _count(1) {}
+   _nmethod(nmethod), _count(1), _next(next) {}
 
   int count()                             { return _count; }
   int increment()                         { _count += 1; return _count; }
--- a/src/hotspot/share/code/exceptionHandlerTable.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/exceptionHandlerTable.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -147,7 +147,7 @@
   implicit_null_entry *adr( uint idx ) const { return &_data[2*idx]; }
   ReallocMark          _nesting;  // assertion check for reallocations
 public:
-  ImplicitExceptionTable( ) :  _data(0), _size(0), _len(0) { }
+  ImplicitExceptionTable( ) :  _size(0), _len(0), _data(0) { }
   // (run-time) construction from nmethod
   ImplicitExceptionTable( const nmethod *nm );
 
--- a/src/hotspot/share/code/nmethod.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1540,6 +1540,7 @@
         }
       } else if (iter.type() == relocInfo::virtual_call_type) {
         // Check compiledIC holders associated with this nmethod
+        ResourceMark rm;
         CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
--- a/src/hotspot/share/code/oopRecorder.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/oopRecorder.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -162,7 +162,7 @@
 
 oop ObjectLookup::ObjectEntry::oop_value() const { return JNIHandles::resolve(_value); }
 
-ObjectLookup::ObjectLookup(): _gc_count(Universe::heap()->total_collections()), _values(4) {}
+ObjectLookup::ObjectLookup(): _values(4), _gc_count(Universe::heap()->total_collections()) {}
 
 void ObjectLookup::maybe_resort() {
   // The values are kept sorted by address which may be invalidated
--- a/src/hotspot/share/code/vtableStubs.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/code/vtableStubs.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -49,8 +49,8 @@
   void* operator new(size_t size, int code_size) throw();
 
   VtableStub(bool is_vtable_stub, int index)
-        : _next(NULL), _is_vtable_stub(is_vtable_stub),
-          _index(index), _ame_offset(-1), _npe_offset(-1) {}
+        : _next(NULL), _index(index), _ame_offset(-1), _npe_offset(-1),
+          _is_vtable_stub(is_vtable_stub) {}
   VtableStub* next() const                       { return _next; }
   int index() const                              { return _index; }
   static VMReg receiver_location()               { return _receiver_location; }
--- a/src/hotspot/share/compiler/abstractCompiler.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/compiler/abstractCompiler.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -91,7 +91,7 @@
 #endif
 
  public:
-  AbstractCompiler(CompilerType type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
+  AbstractCompiler(CompilerType type) : _num_compiler_threads(0), _compiler_state(uninitialized), _type(type) {}
 
   // This function determines the compiler thread that will perform the
   // shutdown of the corresponding compiler runtime.
--- a/src/hotspot/share/compiler/compilerDirectives.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/compiler/compilerDirectives.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -31,7 +31,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 
-CompilerDirectives::CompilerDirectives() :_match(NULL), _next(NULL), _ref_count(0) {
+CompilerDirectives::CompilerDirectives() : _next(NULL), _match(NULL), _ref_count(0) {
   _c1_store = new DirectiveSet(this);
   _c2_store = new DirectiveSet(this);
 };
@@ -442,7 +442,7 @@
   char str[] = "*.*";
   const char* error_msg = NULL;
   _default_directives->add_match(str, error_msg);
-#ifdef COMPILER1
+#if defined(COMPILER1) || INCLUDE_JVMCI
   _default_directives->_c1_store->EnableOption = true;
 #endif
 #ifdef COMPILER2
--- a/src/hotspot/share/compiler/methodLiveness.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/compiler/methodLiveness.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -569,11 +569,11 @@
 
 
 MethodLiveness::BasicBlock::BasicBlock(MethodLiveness *analyzer, int start, int limit) :
-         _gen(analyzer->arena(),            analyzer->bit_map_size_bits()),
-         _kill(analyzer->arena(),           analyzer->bit_map_size_bits()),
          _entry(analyzer->arena(),          analyzer->bit_map_size_bits()),
          _normal_exit(analyzer->arena(),    analyzer->bit_map_size_bits()),
          _exception_exit(analyzer->arena(), analyzer->bit_map_size_bits()),
+         _gen(analyzer->arena(),            analyzer->bit_map_size_bits()),
+         _kill(analyzer->arena(),           analyzer->bit_map_size_bits()),
          _last_bci(-1) {
   _analyzer = analyzer;
   _start_bci = start;
--- a/src/hotspot/share/compiler/methodMatcher.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/compiler/methodMatcher.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -62,11 +62,11 @@
 #define RANGESLASH "[*" RANGEBASE "/]"
 
 MethodMatcher::MethodMatcher():
-    _class_mode(Exact)
-  , _method_mode(Exact)
-  , _class_name(NULL)
+    _class_name(NULL)
   , _method_name(NULL)
-  , _signature(NULL) {
+  , _signature(NULL)
+  , _class_mode(Exact)
+  , _method_mode(Exact) {
 }
 
 MethodMatcher::~MethodMatcher() {
--- a/src/hotspot/share/compiler/oopMap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/compiler/oopMap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -604,7 +604,7 @@
 }
 #endif
 
-ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _new_set(NULL), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1) {
+ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
   _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
 }
 
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -320,7 +320,12 @@
 
 // Constructor
 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
+  _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
+                    CMSRescanMultiple),
+  _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
+                    CMSConcMarkMultiple),
   _bt(bs, mr),
+  _collector(NULL),
   // free list locks are in the range of values taken by _lockRank
   // This range currently is [_leaf+2, _leaf+3]
   // Note: this requires that CFLspace c'tors
@@ -328,15 +333,10 @@
   // are acquired in the program text. This is true today.
   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
                 Monitor::_safepoint_check_sometimes),
+  _preconsumptionDirtyCardClosure(NULL),
   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
                           "CompactibleFreeListSpace._dict_par_lock", true,
-                          Monitor::_safepoint_check_never),
-  _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSRescanMultiple),
-  _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSConcMarkMultiple),
-  _collector(NULL),
-  _preconsumptionDirtyCardClosure(NULL)
+                          Monitor::_safepoint_check_never)
 {
   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
          "FreeChunk is larger than expected");
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -449,57 +449,57 @@
 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
                            CardTableRS*                   ct,
                            ConcurrentMarkSweepPolicy*     cp):
+  _overflow_list(NULL),
+  _conc_workers(NULL),     // may be set later
+  _completed_initialization(false),
+  _collection_count_start(0),
+  _should_unload_classes(CMSClassUnloadingEnabled),
+  _concurrent_cycles_since_last_unload(0),
+  _roots_scanning_options(GenCollectedHeap::SO_None),
+  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
+  _verifying(false),
+  _collector_policy(cp),
+  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
+  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+  _cms_start_registered(false),
   _cmsGen(cmsGen),
   // Adjust span to cover old (cms) gen
   _span(cmsGen->reserved()),
   _ct(ct),
-  _span_based_discoverer(_span),
-  _ref_processor(NULL),    // will be set later
-  _conc_workers(NULL),     // may be set later
-  _abort_preclean(false),
-  _start_sampling(false),
-  _between_prologue_and_epilogue(false),
   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
   _modUnionTable((CardTable::card_shift - LogHeapWordSize),
                  -1 /* lock-free */, "No_lock" /* dummy */),
-  _modUnionClosurePar(&_modUnionTable),
+  _restart_addr(NULL),
+  _ser_pmc_preclean_ovflw(0),
+  _ser_pmc_remark_ovflw(0),
+  _par_pmc_remark_ovflw(0),
+  _ser_kac_preclean_ovflw(0),
+  _ser_kac_ovflw(0),
+  _par_kac_ovflw(0),
+#ifndef PRODUCT
+  _num_par_pushes(0),
+#endif
+  _span_based_discoverer(_span),
+  _ref_processor(NULL),    // will be set later
   // Construct the is_alive_closure with _span & markBitMap
   _is_alive_closure(_span, &_markBitMap),
-  _restart_addr(NULL),
-  _overflow_list(NULL),
+  _modUnionClosurePar(&_modUnionTable),
+  _between_prologue_and_epilogue(false),
+  _abort_preclean(false),
+  _start_sampling(false),
   _stats(cmsGen),
   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
                              //verify that this lock should be acquired with safepoint check.
                              Monitor::_safepoint_check_sometimes)),
   _eden_chunk_array(NULL),     // may be set in ctor body
+  _eden_chunk_index(0),        // -- ditto --
   _eden_chunk_capacity(0),     // -- ditto --
-  _eden_chunk_index(0),        // -- ditto --
-  _survivor_plab_array(NULL),  // -- ditto --
   _survivor_chunk_array(NULL), // -- ditto --
+  _survivor_chunk_index(0),    // -- ditto --
   _survivor_chunk_capacity(0), // -- ditto --
-  _survivor_chunk_index(0),    // -- ditto --
-  _ser_pmc_preclean_ovflw(0),
-  _ser_kac_preclean_ovflw(0),
-  _ser_pmc_remark_ovflw(0),
-  _par_pmc_remark_ovflw(0),
-  _ser_kac_ovflw(0),
-  _par_kac_ovflw(0),
-#ifndef PRODUCT
-  _num_par_pushes(0),
-#endif
-  _collection_count_start(0),
-  _verifying(false),
-  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
-  _completed_initialization(false),
-  _collector_policy(cp),
-  _should_unload_classes(CMSClassUnloadingEnabled),
-  _concurrent_cycles_since_last_unload(0),
-  _roots_scanning_options(GenCollectedHeap::SO_None),
-  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
-  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
-  _cms_start_registered(false)
+  _survivor_plab_array(NULL)   // -- ditto --
 {
   // Now expand the span and allocate the collection support structures
   // (MUT, marking bit map etc.) to cover both generations subject to
@@ -573,7 +573,6 @@
         log_warning(gc)("task_queues allocation failure.");
         return;
       }
-      _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
       for (i = 0; i < num_queues; i++) {
         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
@@ -585,7 +584,6 @@
       }
       for (i = 0; i < num_queues; i++) {
         _task_queues->queue(i)->initialize();
-        _hash_seed[i] = 17;  // copied from ParNew
       }
     }
   }
@@ -3039,11 +3037,12 @@
                  OopTaskQueueSet* task_queues):
     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
     _collector(collector),
+    _n_workers(0),
+    _result(true),
     _cms_space(cms_space),
-    _n_workers(0), _result(true),
+    _bit_map_lock(collector->bitMapLock()),
     _task_queues(task_queues),
-    _term(_n_workers, task_queues, _collector),
-    _bit_map_lock(collector->bitMapLock())
+    _term(_n_workers, task_queues, _collector)
   {
     _requested_size = _n_workers;
     _term.set_task(this);
@@ -3322,9 +3321,9 @@
     _collector(collector),
     _task(task),
     _span(collector->_span),
-    _work_queue(work_queue),
     _bit_map(bit_map),
-    _overflow_stack(overflow_stack)
+    _overflow_stack(overflow_stack),
+    _work_queue(work_queue)
   { }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
@@ -3416,7 +3415,6 @@
   oop obj_to_scan;
   CMSBitMap* bm = &(_collector->_markBitMap);
   CMSMarkStack* ovflw = &(_collector->_markStack);
-  int* seed = _collector->hash_seed(i);
   ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
   while (true) {
     cl.trim_queue(0);
@@ -3426,7 +3424,7 @@
       // overflow stack may already have been stolen from us.
       // assert(work_q->size() > 0, "Work from overflow stack");
       continue;
-    } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+    } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
       assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
       obj_to_scan->oop_iterate(&cl);
@@ -4325,7 +4323,7 @@
                                   ParMarkRefsIntoAndScanClosure* cl);
 
   // ... work stealing for the above
-  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
+  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl);
 };
 
 class RemarkCLDClosure : public CLDClosure {
@@ -4470,7 +4468,7 @@
   // ---------- ... and drain overflow list.
   _timer.reset();
   _timer.start();
-  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
+  do_work_steal(worker_id, &par_mrias_cl);
   _timer.stop();
   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
 }
@@ -4619,8 +4617,7 @@
 
 // . see if we can share work_queues with ParNew? XXX
 void
-CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
-                                int* seed) {
+CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) {
   OopTaskQueue* work_q = work_queue(i);
   NOT_PRODUCT(int num_steals = 0;)
   oop obj_to_scan;
@@ -4651,7 +4648,7 @@
     // Verify that we have no work before we resort to stealing
     assert(work_q->size() == 0, "Have work, shouldn't steal");
     // Try to steal from other queues that have work
-    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+    if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
       NOT_PRODUCT(num_steals++;)
       assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
@@ -5028,8 +5025,10 @@
     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
       task_queues,
       workers->active_workers()),
-    _task(task),
-    _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
+    _collector(collector),
+    _mark_bit_map(mark_bit_map),
+    _span(span),
+    _task(task)
   {
     assert(_collector->_span.equals(_span) && !_span.is_empty(),
            "Inconsistency in _span");
@@ -5041,8 +5040,7 @@
 
   void do_work_steal(int i,
                      CMSParDrainMarkingStackClosure* drain,
-                     CMSParKeepAliveClosure* keep_alive,
-                     int* seed);
+                     CMSParKeepAliveClosure* keep_alive);
 
   virtual void work(uint worker_id);
 };
@@ -5060,8 +5058,7 @@
   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
   if (_task.marks_oops_alive()) {
-    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
-                  _collector->hash_seed(worker_id));
+    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
   }
   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
@@ -5070,8 +5067,8 @@
 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
    _span(span),
+   _work_queue(work_queue),
    _bit_map(bit_map),
-   _work_queue(work_queue),
    _mark_and_push(collector, span, bit_map, work_queue),
    _low_water_mark(MIN2((work_queue->max_elems()/4),
                         ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
@@ -5080,8 +5077,7 @@
 // . see if we can share work_queues with ParNew? XXX
 void CMSRefProcTaskProxy::do_work_steal(int i,
   CMSParDrainMarkingStackClosure* drain,
-  CMSParKeepAliveClosure* keep_alive,
-  int* seed) {
+  CMSParKeepAliveClosure* keep_alive) {
   OopTaskQueue* work_q = work_queue(i);
   NOT_PRODUCT(int num_steals = 0;)
   oop obj_to_scan;
@@ -5110,7 +5106,7 @@
     // Verify that we have no work before we resort to stealing
     assert(work_q->size() == 0, "Have work, shouldn't steal");
     // Try to steal from other queues that have work
-    if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
+    if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
       NOT_PRODUCT(num_steals++;)
       assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
@@ -5609,8 +5605,8 @@
 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
 // further below.
 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
+  _shifter(shifter),
   _bm(),
-  _shifter(shifter),
   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
                                     Monitor::_safepoint_check_sometimes) : NULL)
 {
@@ -5859,15 +5855,15 @@
                                                        CMSCollector* collector,
                                                        bool should_yield,
                                                        bool concurrent_precleaning):
-  _collector(collector),
   _span(span),
   _bit_map(bit_map),
   _mark_stack(mark_stack),
   _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
                       mark_stack, concurrent_precleaning),
+  _collector(collector),
+  _freelistLock(NULL),
   _yield(should_yield),
-  _concurrent_precleaning(concurrent_precleaning),
-  _freelistLock(NULL)
+  _concurrent_precleaning(concurrent_precleaning)
 {
   // FIXME: Should initialize in base class constructor.
   assert(rd != NULL, "ref_discoverer shouldn't be NULL");
@@ -6964,10 +6960,10 @@
   _limit(_sp->sweep_limit()),
   _freelistLock(_sp->freelistLock()),
   _bitMap(bitMap),
-  _yield(should_yield),
   _inFreeRange(false),           // No free range at beginning of sweep
   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
   _lastFreeRangeCoalesced(false),
+  _yield(should_yield),
   _freeFinger(g->used_region().start())
 {
   NOT_PRODUCT(
@@ -7521,15 +7517,14 @@
          (!_span.contains(addr) || _bit_map->isMarked(addr));
 }
 
-
 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
                       MemRegion span,
                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
                       bool cpc):
   _collector(collector),
   _span(span),
+  _mark_stack(mark_stack),
   _bit_map(bit_map),
-  _mark_stack(mark_stack),
   _concurrent_precleaning(cpc) {
   assert(!_span.is_empty(), "Empty span could spell trouble");
 }
@@ -7617,8 +7612,8 @@
                                 OopTaskQueue* work_queue):
   _collector(collector),
   _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue) { }
+  _work_queue(work_queue),
+  _bit_map(bit_map) { }
 
 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -544,8 +544,6 @@
   Stack<oop, mtGC>     _preserved_oop_stack;
   Stack<markOop, mtGC> _preserved_mark_stack;
 
-  int*             _hash_seed;
-
   // In support of multi-threaded concurrent phases
   YieldingFlexibleWorkGang* _conc_workers;
 
@@ -713,7 +711,6 @@
   bool stop_world_and_do(CMS_op_type op);
 
   OopTaskQueueSet* task_queues() { return _task_queues; }
-  int*             hash_seed(int i) { return &_hash_seed[i]; }
   YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
 
   // Support for parallelizing Eden rescan in CMS remark phase
@@ -1455,9 +1452,9 @@
                             CMSMarkStack* mark_stack,
                             MarkRefsIntoAndScanClosure* cl):
     _space(space),
-    _num_dirty_cards(0),
     _scan_cl(collector, span, collector->ref_processor(), bit_map,
-                 mark_stack, cl) { }
+                 mark_stack, cl),
+    _num_dirty_cards(0) { }
 
   MarkFromDirtyCardsClosure(CMSCollector* collector,
                             MemRegion span,
@@ -1466,9 +1463,9 @@
                             OopTaskQueue* work_queue,
                             ParMarkRefsIntoAndScanClosure* cl):
     _space(space),
-    _num_dirty_cards(0),
     _scan_cl(collector, span, collector->ref_processor(), bit_map,
-             work_queue, cl) { }
+             work_queue, cl),
+    _num_dirty_cards(0) { }
 
   void do_MemRegion(MemRegion mr);
   void set_space(CompactibleFreeListSpace* space) { _space = space; }
@@ -1710,8 +1707,8 @@
                       bool cpc):
     _collector(collector),
     _span(span),
+    _mark_stack(mark_stack),
     _bit_map(bit_map),
-    _mark_stack(mark_stack),
     _keep_alive(keep_alive),
     _concurrent_precleaning(cpc) {
     assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
@@ -1735,8 +1732,8 @@
                                  OopTaskQueue* work_queue):
     _collector(collector),
     _span(span),
+    _work_queue(work_queue),
     _bit_map(bit_map),
-    _work_queue(work_queue),
     _mark_and_push(collector, span, bit_map, work_queue) { }
 
  public:
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -75,27 +75,28 @@
                                        PreservedMarks* preserved_marks_,
                                        size_t desired_plab_sz_,
                                        ParallelTaskTerminator& term_) :
-  _to_space(to_space_),
-  _old_gen(old_gen_),
-  _young_gen(young_gen_),
-  _thread_num(thread_num_),
   _work_queue(work_queue_set_->queue(thread_num_)),
-  _to_space_full(false),
   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
   _preserved_marks(preserved_marks_),
-  _ageTable(false), // false ==> not the global age table, no perf data.
   _to_space_alloc_buffer(desired_plab_sz_),
   _to_space_closure(young_gen_, this),
   _old_gen_closure(young_gen_, this),
   _to_space_root_closure(young_gen_, this),
+  _older_gen_closure(young_gen_, this),
   _old_gen_root_closure(young_gen_, this),
-  _older_gen_closure(young_gen_, this),
   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
                       work_queue_set_, &term_),
   _is_alive_closure(young_gen_),
   _scan_weak_ref_closure(young_gen_, this),
   _keep_alive_closure(&_scan_weak_ref_closure),
+  _to_space(to_space_),
+  _young_gen(young_gen_),
+  _old_gen(old_gen_),
+  _young_old_boundary(NULL),
+  _thread_num(thread_num_),
+  _ageTable(false), // false ==> not the global age table, no perf data.
+  _to_space_full(false),
   _strong_roots_time(0.0),
   _term_time(0.0)
 {
@@ -106,7 +107,6 @@
   #endif // TASKQUEUE_STATS
 
   _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
-  _hash_seed = 17;  // Might want to take time-based random value.
   _start = os::elapsedTime();
   _old_gen_closure.set_generation(old_gen_);
   _old_gen_root_closure.set_generation(old_gen_);
@@ -345,9 +345,9 @@
                                              PreservedMarksSet& preserved_marks_set,
                                              size_t desired_plab_sz,
                                              ParallelTaskTerminator& term)
-  : _young_gen(young_gen),
+  : _term(term),
+    _young_gen(young_gen),
     _old_gen(old_gen),
-    _term(term),
     _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)),
     _num_threads(num_threads)
 {
@@ -530,8 +530,8 @@
 
     _par_scan_state(par_scan_state_),
     _to_space_closure(to_space_closure_),
+    _to_space_root_closure(to_space_root_closure_),
     _old_gen_closure(old_gen_closure_),
-    _to_space_root_closure(to_space_root_closure_),
     _old_gen_root_closure(old_gen_root_closure_),
     _par_gen(par_gen_),
     _task_queues(task_queues_),
@@ -550,7 +550,6 @@
 
     // Attempt to steal work from promoted.
     if (task_queues()->steal(par_scan_state()->thread_num(),
-                             par_scan_state()->hash_seed(),
                              obj_to_scan)) {
       bool res = work_q->push(obj_to_scan);
       assert(res, "Empty queue should have room for a push.");
@@ -627,9 +626,9 @@
 
 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
+  _plab_stats("Young", YoungPLABSize, PLABWeight),
   _overflow_list(NULL),
-  _is_alive_closure(this),
-  _plab_stats("Young", YoungPLABSize, PLABWeight)
+  _is_alive_closure(this)
 {
   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
   NOT_PRODUCT(_num_par_pushes = 0;)
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -96,7 +96,6 @@
 
   HeapWord *_young_old_boundary;
 
-  int _hash_seed;
   int _thread_num;
   AgeTable _ageTable;
 
@@ -165,7 +164,6 @@
   // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
   inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
 
-  int* hash_seed()  { return &_hash_seed; }
   int  thread_num() { return _thread_num; }
 
   // Allocate a to-space block of size "sz", or else return NULL.
--- a/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -27,11 +27,11 @@
 #include "gc/epsilon/epsilonMemoryPool.hpp"
 
 EpsilonMemoryPool::EpsilonMemoryPool(EpsilonHeap* heap) :
-        _heap(heap),
         CollectedMemoryPool("Epsilon Heap",
                             heap->capacity(),
                             heap->max_capacity(),
-                            false) {
+                            false),
+        _heap(heap) {
   assert(UseEpsilonGC, "sanity");
 }
 
--- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -42,7 +42,7 @@
   // addr (the address of the field to be read) must be a LIR_Address
   // pre_val (a temporary register) must be a register;
   G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
-    _addr(addr), _pre_val(pre_val), _do_load(true),
+    _do_load(true), _addr(addr), _pre_val(pre_val),
     _patch_code(patch_code), _info(info)
   {
     assert(_pre_val->is_register(), "should be temporary register");
@@ -52,7 +52,7 @@
   // Version that _does not_ generate load of the previous value; the
   // previous value is assumed to have already been loaded into pre_val.
   G1PreBarrierStub(LIR_Opr pre_val) :
-    _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
+    _do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val),
     _patch_code(lir_patch_none), _info(NULL)
   {
     assert(_pre_val->is_register(), "should be a register");
--- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -251,10 +251,12 @@
 
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
-  : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _count(0),
-    _used_bytes_before(0) { }
-
+  : _alloc_region(NULL),
+    _count(0),
+    _used_bytes_before(0),
+    _bot_updates(bot_updates),
+    _name(name)
+ { }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
                                                     bool force) {
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -37,9 +37,10 @@
   _g1h(heap),
   _survivor_is_full(false),
   _old_is_full(false),
-  _retained_old_gc_alloc_region(NULL),
+  _mutator_alloc_region(),
   _survivor_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Young)),
-  _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)) {
+  _old_gc_alloc_region(heap->alloc_buffer_stats(InCSetState::Old)),
+  _retained_old_gc_alloc_region(NULL) {
 }
 
 void G1Allocator::init_mutator_alloc_region() {
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -221,6 +221,7 @@
 
 public:
   G1ArchiveAllocator(G1CollectedHeap* g1h, bool open) :
+    _open(open),
     _g1h(g1h),
     _allocation_region(NULL),
     _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
@@ -229,8 +230,7 @@
     _summary_bytes_used(0),
     _bottom(NULL),
     _top(NULL),
-    _max(NULL),
-    _open(open) { }
+    _max(NULL) { }
 
   virtual ~G1ArchiveAllocator() {
     assert(_allocation_region == NULL, "_allocation_region not NULL");
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -84,12 +84,12 @@
     _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
-    _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
     _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _non_young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
     _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
     _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
+    _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
     _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
     _recent_avg_pause_time_ratio(0.0),
     _last_pause_time_ratio(0.0) {
--- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -75,12 +75,12 @@
 //////////////////////////////////////////////////////////////////////
 
 G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp) :
+  _next_offset_threshold(NULL),
+  _next_offset_index(0),
+  DEBUG_ONLY(_object_can_span(false) COMMA)
   _bot(array),
-  _space(gsp),
-  _next_offset_threshold(NULL),
-  _next_offset_index(0)
+  _space(gsp)
 {
-  debug_only(_object_can_span = false;)
 }
 
 // The arguments follow the normal convention of denoting
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1407,52 +1407,70 @@
   _verifier->verify_region_sets_optional();
 }
 
-// Public methods.
-
 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
   CollectedHeap(),
   _young_gen_sampling_thread(NULL),
+  _workers(NULL),
   _collector_policy(collector_policy),
+  _card_table(NULL),
   _soft_ref_policy(),
-  _card_table(NULL),
   _memory_manager("G1 Young Generation", "end of minor GC"),
   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
   _eden_pool(NULL),
   _survivor_pool(NULL),
   _old_pool(NULL),
+  _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
+  _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
+  _bot(NULL),
+  _listener(),
+  _hrm(),
+  _allocator(NULL),
+  _verifier(NULL),
+  _summary_bytes_used(0),
+  _archive_allocator(NULL),
+  _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
+  _old_evac_stats("Old", OldPLABSize, PLABWeight),
+  _expand_heap_after_alloc_failure(true),
+  _g1mm(NULL),
+  _humongous_reclaim_candidates(),
+  _has_humongous_reclaim_candidates(false),
+  _hr_printer(),
+  _collector_state(),
+  _old_marking_cycles_started(0),
+  _old_marking_cycles_completed(0),
+  _eden(),
+  _survivor(),
   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
   _g1_policy(new G1Policy(_gc_timer_stw)),
+  _heap_sizing_policy(NULL),
   _collection_set(this, _g1_policy),
+  _hot_card_cache(NULL),
+  _g1_rem_set(NULL),
   _dirty_card_queue_set(false),
+  _cm(NULL),
+  _cm_thread(NULL),
+  _cr(NULL),
+  _task_queues(NULL),
+  _evacuation_failed(false),
+  _evacuation_failed_info_array(NULL),
+  _preserved_marks_set(true /* in_c_heap */),
+#ifndef PRODUCT
+  _evacuation_failure_alot_for_current_gc(false),
+  _evacuation_failure_alot_gc_number(0),
+  _evacuation_failure_alot_count(0),
+#endif
   _ref_processor_stw(NULL),
   _is_alive_closure_stw(this),
   _is_subject_to_discovery_stw(this),
   _ref_processor_cm(NULL),
   _is_alive_closure_cm(this),
   _is_subject_to_discovery_cm(this),
-  _bot(NULL),
-  _hot_card_cache(NULL),
-  _g1_rem_set(NULL),
-  _cr(NULL),
-  _g1mm(NULL),
-  _preserved_marks_set(true /* in_c_heap */),
-  _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
-  _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
-  _humongous_reclaim_candidates(),
-  _has_humongous_reclaim_candidates(false),
-  _archive_allocator(NULL),
-  _summary_bytes_used(0),
-  _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
-  _old_evac_stats("Old", OldPLABSize, PLABWeight),
-  _expand_heap_after_alloc_failure(true),
-  _old_marking_cycles_started(0),
-  _old_marking_cycles_completed(0),
   _in_cset_fast_test() {
 
   _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                          /* are_GC_task_threads */true,
-                          /* are_ConcurrentGC_threads */false);
+                          true /* are_GC_task_threads */,
+                          false /* are_ConcurrentGC_threads */);
   _workers->initialize_workers();
   _verifier = new G1HeapVerifier(this);
 
@@ -3576,10 +3594,10 @@
   // The constructor is run in the VMThread.
   G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
       AbstractGangTask("Parallel Cleaning"),
+      _unloading_occurred(unloading_occurred),
       _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
       _code_cache_task(num_workers, is_alive, unloading_occurred),
       _klass_cleaning_task(),
-      _unloading_occurred(unloading_occurred),
       _resolved_method_cleaning_task() {
   }
 
@@ -4325,11 +4343,11 @@
 public:
   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
     AbstractGangTask("G1 Free Collection Set"),
+    _collection_set(collection_set),
     _cl(evacuation_info, surviving_young_words),
-    _collection_set(collection_set),
     _surviving_young_words(surviving_young_words),
+    _rs_lengths(0),
     _serial_work_claim(0),
-    _rs_lengths(0),
     _parallel_work_claim(0),
     _num_work_items(collection_set->region_length()),
     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -181,9 +181,6 @@
   // Start a new incremental collection set for the next pause.
   void start_new_collection_set();
 
-  // The number of regions we could create by expansion.
-  uint _expansion_regions;
-
   // The block offset table for the G1 heap.
   G1BlockOffsetTable* _bot;
 
@@ -1434,9 +1431,9 @@
                                 G1ParScanThreadState* par_scan_state,
                                 RefToScanQueueSet* queues,
                                 ParallelTaskTerminator* terminator)
-    : _g1h(g1h), _par_scan_state(par_scan_state),
-      _queues(queues), _terminator(terminator),
-      _start_term(0.0), _term_time(0.0), _term_attempts(0) {}
+    : _start_term(0.0), _term_time(0.0), _term_attempts(0),
+      _g1h(g1h), _par_scan_state(par_scan_state),
+      _queues(queues), _terminator(terminator) {}
 
   void do_void();
 
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -57,12 +57,11 @@
   _eden_region_length(0),
   _survivor_region_length(0),
   _old_region_length(0),
-  _bytes_used_before(0),
-  _recorded_rs_lengths(0),
   _collection_set_regions(NULL),
   _collection_set_cur_length(0),
   _collection_set_max_length(0),
-  // Incremental CSet attributes
+  _bytes_used_before(0),
+  _recorded_rs_lengths(0),
   _inc_build_state(Inactive),
   _inc_bytes_used_before(0),
   _inc_recorded_rs_lengths(0),
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -625,7 +625,7 @@
     G1CMBitMap* _bitmap;
     G1ConcurrentMark* _cm;
   public:
-    G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
+    G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _bitmap(bitmap), _cm(cm) {
     }
 
     virtual bool do_heap_region(HeapRegion* r) {
@@ -1095,7 +1095,7 @@
 
   public:
     G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
-      _g1h(g1h), _cm(cm), _num_regions_selected_for_rebuild(0), _cl(cl) { }
+      _g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
 
     virtual bool do_heap_region(HeapRegion* r) {
       update_remset_before_rebuild(r);
@@ -1415,10 +1415,9 @@
   bool              _is_serial;
 public:
   G1CMKeepAliveAndDrainClosure(G1ConcurrentMark* cm, G1CMTask* task, bool is_serial) :
-    _cm(cm), _task(task), _is_serial(is_serial),
-    _ref_counter_limit(G1RefProcDrainInterval) {
+    _cm(cm), _task(task), _ref_counter_limit(G1RefProcDrainInterval),
+    _ref_counter(_ref_counter_limit), _is_serial(is_serial) {
     assert(!_is_serial || _task->worker_id() == 0, "only task 0 for serial code");
-    _ref_counter = _ref_counter_limit;
   }
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
@@ -2466,8 +2465,8 @@
                        hits, misses, percent_of(hits, hits + misses));
 }
 
-bool G1ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry) {
-  return _task_queues->steal(worker_id, hash_seed, task_entry);
+bool G1ConcurrentMark::try_stealing(uint worker_id, G1TaskQueueEntry& task_entry) {
+  return _task_queues->steal(worker_id, task_entry);
 }
 
 /*****************************************************************************
@@ -2773,7 +2772,7 @@
            "only way to reach here");
     while (!has_aborted()) {
       G1TaskQueueEntry entry;
-      if (_cm->try_stealing(_worker_id, &_hash_seed, entry)) {
+      if (_cm->try_stealing(_worker_id, entry)) {
         scan_task_entry(entry);
 
         // And since we're towards the end, let's totally drain the
@@ -2915,7 +2914,6 @@
   _refs_reached(0),
   _refs_reached_limit(0),
   _real_refs_reached_limit(0),
-  _hash_seed(17),
   _has_aborted(false),
   _has_timed_out(false),
   _draining_satb_buffers(false),
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -519,7 +519,7 @@
   }
 
   // Attempts to steal an object from the task queues of other tasks
-  bool try_stealing(uint worker_id, int* hash_seed, G1TaskQueueEntry& task_entry);
+  bool try_stealing(uint worker_id, G1TaskQueueEntry& task_entry);
 
   G1ConcurrentMark(G1CollectedHeap* g1h,
                    G1RegionToSpaceMapper* prev_bitmap_storage,
@@ -685,8 +685,6 @@
   // it was decreased).
   size_t                      _real_refs_reached_limit;
 
-  // Used by the work stealing
-  int                         _hash_seed;
   // If true, then the task has aborted for some reason
   bool                        _has_aborted;
   // Set when the task aborts because it has met its time quota
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkBitMap.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -42,7 +42,7 @@
   G1ConcurrentMark* const _cm;
   G1CMTask* const _task;
 public:
-  G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm) : _task(task), _cm(cm) { }
+  G1CMBitMapClosure(G1CMTask *task, G1ConcurrentMark* cm) : _cm(cm), _task(task) { }
 
   bool do_addr(HeapWord* const addr);
 };
@@ -88,7 +88,7 @@
     return mark_distance();
   }
 
-  G1CMBitMap() : _covered(), _bm(), _shifter(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); }
+  G1CMBitMap() : _covered(), _shifter(LogMinObjAlignment), _bm(), _listener() { _listener.set_bitmap(this); }
 
   // Initializes the underlying BitMap to cover the given area.
   void initialize(MemRegion heap, G1RegionToSpaceMapper* storage);
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -74,15 +74,14 @@
   };
 };
 
-// The CM thread is created when the G1 garbage collector is used
-
 G1ConcurrentMarkThread::G1ConcurrentMarkThread(G1ConcurrentMark* cm) :
   ConcurrentGCThread(),
+  _vtime_start(0.0),
+  _vtime_accum(0.0),
+  _vtime_mark_accum(0.0),
   _cm(cm),
   _state(Idle),
-  _phase_manager_stack(),
-  _vtime_accum(0.0),
-  _vtime_mark_accum(0.0) {
+  _phase_manager_stack() {
 
   set_name("G1 Main Marker");
   create_and_start();
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -34,11 +34,12 @@
 
 G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr, uint worker_id) :
   ConcurrentGCThread(),
+  _vtime_start(0.0),
+  _vtime_accum(0.0),
   _worker_id(worker_id),
   _active(false),
   _monitor(NULL),
-  _cr(cr),
-  _vtime_accum(0.0)
+  _cr(cr)
 {
   // Each thread has its own monitor. The i-th thread is responsible for signaling
   // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -41,7 +41,6 @@
   double _vtime_start;  // Initial virtual time.
   double _vtime_accum;  // Accumulated virtual time.
   uint _worker_id;
-  uint _worker_id_offset;
 
   bool _active;
   Monitor* _monitor;
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -46,7 +46,7 @@
 
 public:
   UpdateRSetDeferred(DirtyCardQueue* dcq) :
-    _g1h(G1CollectedHeap::heap()), _ct(_g1h->card_table()), _dcq(dcq) {}
+    _g1h(G1CollectedHeap::heap()), _dcq(dcq), _ct(_g1h->card_table()) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
@@ -203,10 +203,10 @@
   RemoveSelfForwardPtrHRClosure(uint worker_id,
                                 HeapRegionClaimer* hrclaimer) :
     _g1h(G1CollectedHeap::heap()),
+    _worker_id(worker_id),
+    _hrclaimer(hrclaimer),
     _dcq(&_g1h->dirty_card_queue_set()),
-    _update_rset_cl(&_dcq),
-    _worker_id(worker_id),
-    _hrclaimer(hrclaimer) {
+    _update_rset_cl(&_dcq){
   }
 
   size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -29,12 +29,14 @@
 
 G1FullGCMarker::G1FullGCMarker(uint worker_id, PreservedMarks* preserved_stack, G1CMBitMap* bitmap) :
     _worker_id(worker_id),
+    _bitmap(bitmap),
+    _oop_stack(),
+    _objarray_stack(),
+    _preserved_stack(preserved_stack),
     _mark_closure(worker_id, this, G1CollectedHeap::heap()->ref_processor_stw()),
     _verify_closure(VerifyOption_G1UseFullMarking),
-    _cld_closure(mark_closure()),
     _stack_closure(this),
-    _preserved_stack(preserved_stack),
-    _bitmap(bitmap) {
+    _cld_closure(mark_closure()) {
   _oop_stack.initialize();
   _objarray_stack.initialize();
 }
@@ -46,15 +48,14 @@
 void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
                                       ObjArrayTaskQueueSet* array_stacks,
                                       ParallelTaskTerminator* terminator) {
-  int hash_seed = 17;
   do {
     drain_stack();
     ObjArrayTask steal_array;
-    if (array_stacks->steal(_worker_id, &hash_seed, steal_array)) {
+    if (array_stacks->steal(_worker_id, steal_array)) {
       follow_array_chunk(objArrayOop(steal_array.obj()), steal_array.index());
     } else {
       oop steal_oop;
-      if (oop_stacks->steal(_worker_id, &hash_seed, steal_oop)) {
+      if (oop_stacks->steal(_worker_id, steal_oop)) {
         follow_object(steal_oop);
       }
     }
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -45,7 +45,6 @@
 class G1CMBitMap;
 
 class G1FullGCMarker : public CHeapObj<mtGC> {
-private:
   uint               _worker_id;
   // Backing mark bitmap
   G1CMBitMap*        _bitmap;
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -39,10 +39,10 @@
 
 G1VerifyOopClosure::G1VerifyOopClosure(VerifyOption option) :
    _g1h(G1CollectedHeap::heap()),
+   _failures(false),
    _containing_obj(NULL),
    _verify_option(option),
-   _cc(0),
-   _failures(false) {
+   _cc(0) {
 }
 
 void G1VerifyOopClosure::print_object(outputStream* out, oop obj) {
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -61,9 +61,9 @@
 
 public:
   G1MarkAndPushClosure(uint worker, G1FullGCMarker* marker, ReferenceDiscoverer* ref) :
+    OopIterateClosure(ref),
     _marker(marker),
-    _worker_id(worker),
-    OopIterateClosure(ref) { }
+    _worker_id(worker) { }
 
   template <class T> inline void do_oop_work(T* p);
   virtual void do_oop(oop* p);
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -61,8 +61,8 @@
 
 G1FullGCPrepareTask::G1FullGCPrepareTask(G1FullCollector* collector) :
     G1FullGCTask("G1 Prepare Compact Task", collector),
-    _hrclaimer(collector->workers()),
-    _freed_regions(false) {
+    _freed_regions(false),
+    _hrclaimer(collector->workers()) {
 }
 
 void G1FullGCPrepareTask::set_freed_regions() {
--- a/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -36,8 +36,8 @@
     _active(),
     _cpu_time(),
     _soft_refs(clear_soft, _g1h->soft_ref_policy()),
+    _collector_stats(_g1h->g1mm()->full_collection_counters()),
     _memory_stats(memory_manager, _g1h->gc_cause()),
-    _collector_stats(_g1h->g1mm()->full_collection_counters()),
     _heap_transition(_g1h) {
   _timer.register_gc_start();
   _tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -489,7 +489,7 @@
 }
 
 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
-    _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
+  _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id) {
   if (_phase_times != NULL) {
     _start_time = Ticks::now();
   }
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -127,7 +127,7 @@
 
 public:
   G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
-    _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
+    _g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {}
 
   void do_oop(oop* p) { do_oop_work(p); }
   void do_oop(narrowOop* p) { do_oop_work(p); }
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -29,7 +29,10 @@
 #include "runtime/atomic.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
-  _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
+  _g1h(g1h), _use_cache(false), _card_counts(g1h),
+  _hot_cache(NULL), _hot_cache_size(0), _hot_cache_par_chunk_size(0),
+  _hot_cache_idx(0), _hot_cache_par_claimed_idx(0)
+{}
 
 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
   if (default_use_cache()) {
--- a/src/hotspot/share/gc/g1/g1IHOPControl.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -32,8 +32,8 @@
 G1IHOPControl::G1IHOPControl(double initial_ihop_percent) :
   _initial_ihop_percent(initial_ihop_percent),
   _target_occupancy(0),
-  _last_allocated_bytes(0),
-  _last_allocation_time_s(0.0)
+  _last_allocation_time_s(0.0),
+  _last_allocated_bytes(0)
 {
   assert(_initial_ihop_percent >= 0.0 && _initial_ihop_percent <= 100.0, "Initial IHOP value must be between 0 and 100 but is %.3f", initial_ihop_percent);
 }
@@ -86,12 +86,12 @@
                                              size_t heap_reserve_percent,
                                              size_t heap_waste_percent) :
   G1IHOPControl(ihop_percent),
+  _heap_reserve_percent(heap_reserve_percent),
+  _heap_waste_percent(heap_waste_percent),
   _predictor(predictor),
   _marking_times_s(10, 0.95),
   _allocation_rate_s(10, 0.95),
-  _last_unrestrained_young_size(0),
-  _heap_reserve_percent(heap_reserve_percent),
-  _heap_waste_percent(heap_waste_percent)
+  _last_unrestrained_young_size(0)
 {
 }
 
--- a/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -32,10 +32,11 @@
                                      size_t init_size,
                                      size_t max_size,
                                      bool support_usage_threshold) :
-  _g1mm(g1h->g1mm()), CollectedMemoryPool(name,
-                                          init_size,
-                                          max_size,
-                                          support_usage_threshold) {
+  CollectedMemoryPool(name,
+                      init_size,
+                      max_size,
+                      support_usage_threshold),
+  _g1mm(g1h->g1mm()) {
   assert(UseG1GC, "sanity");
 }
 
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -80,20 +80,24 @@
   _incremental_collection_counters(NULL),
   _full_collection_counters(NULL),
   _conc_collection_counters(NULL),
+  _young_collection_counters(NULL),
   _old_collection_counters(NULL),
   _old_space_counters(NULL),
-  _young_collection_counters(NULL),
   _eden_counters(NULL),
   _from_counters(NULL),
   _to_counters(NULL),
 
   _overall_reserved(0),
-  _overall_committed(0),    _overall_used(0),
+  _overall_committed(0),
+  _overall_used(0),
   _young_region_num(0),
   _young_gen_committed(0),
-  _eden_committed(0),       _eden_used(0),
-  _survivor_committed(0),   _survivor_used(0),
-  _old_committed(0),        _old_used(0) {
+  _eden_committed(0),
+  _eden_used(0),
+  _survivor_committed(0),
+  _survivor_used(0),
+  _old_committed(0),
+  _old_used(0) {
 
   _overall_reserved = g1h->max_capacity();
   recalculate_sizes();
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -162,7 +162,7 @@
 public:
   G1CLDScanClosure(G1ParCopyHelper* closure,
                    bool process_only_dirty, bool must_claim)
-      : _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
+  : _closure(closure), _process_only_dirty(process_only_dirty), _must_claim(must_claim), _count(0) {}
   void do_cld(ClassLoaderData* cld);
 };
 
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -34,8 +34,8 @@
 #include "utilities/bitMap.inline.hpp"
 
 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
-  _low_boundary(NULL), _high_boundary(NULL), _committed(mtGC), _page_size(0), _special(false),
-  _dirty(mtGC), _executable(false) {
+  _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
+  _committed(mtGC), _dirty(mtGC), _special(false), _executable(false) {
   initialize_with_page_size(rs, used_size, page_size);
 }
 
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -47,7 +47,6 @@
     _age_table(false),
     _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
     _scanner(g1h, this),
-    _hash_seed(17),
     _worker_id(worker_id),
     _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
     _stack_trim_lower_threshold(GCDrainStackTargetSize),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -57,7 +57,6 @@
   uint              _tenuring_threshold;
   G1ScanEvacuatedObjClosure  _scanner;
 
-  int  _hash_seed;
   uint _worker_id;
 
   // Upper and lower threshold to start and end work queue draining.
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -140,7 +140,7 @@
 
 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
   StarTask stolen_task;
-  while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
+  while (task_queues->steal(_worker_id, stolen_task)) {
     assert(verify_task(stolen_task), "sanity");
     dispatch_reference(stolen_task);
 
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -53,21 +53,32 @@
   _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
   _ihop_control(create_ihop_control(&_predictor)),
   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
+  _full_collection_start_sec(0.0),
+  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
+  _young_list_target_length(0),
   _young_list_fixed_length(0),
+  _young_list_max_length(0),
   _short_lived_surv_rate_group(new SurvRateGroup()),
   _survivor_surv_rate_group(new SurvRateGroup()),
   _reserve_factor((double) G1ReservePercent / 100.0),
   _reserve_regions(0),
+  _young_gen_sizer(),
+  _free_regions_at_end_of_collection(0),
+  _max_rs_lengths(0),
   _rs_lengths_prediction(0),
+  _pending_cards(0),
   _bytes_allocated_in_old_since_last_gc(0),
   _initial_mark_to_mixed(),
   _collection_set(NULL),
+  _bytes_copied_during_gc(0),
   _g1h(NULL),
   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
+  _mark_remark_start_sec(0),
+  _mark_cleanup_start_sec(0),
   _tenuring_threshold(MaxTenuringThreshold),
   _max_survivor_regions(0),
-  _survivors_age_table(true),
-  _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) {
+  _survivors_age_table(true)
+{
 }
 
 G1Policy::~G1Policy() {
--- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -27,16 +27,17 @@
 #include "memory/allocation.inline.hpp"
 
 G1RegionMarkStatsCache::G1RegionMarkStatsCache(G1RegionMarkStats* target, uint max_regions, uint num_cache_entries) :
+  _target(target),
   _num_stats(max_regions),
-  _target(target),
+  _cache(NULL),
   _num_cache_entries(num_cache_entries),
   _cache_hits(0),
-  _cache_misses(0) {
+  _cache_misses(0),
+  _num_cache_entries_mask(_num_cache_entries - 1) {
 
   guarantee(is_power_of_2(num_cache_entries),
             "Number of cache entries must be power of two, but is %u", num_cache_entries);
   _cache = NEW_C_HEAP_ARRAY(G1RegionMarkStatsCacheEntry, _num_cache_entries, mtGC);
-  _num_cache_entries_mask = _num_cache_entries - 1;
 }
 
 G1RegionMarkStatsCache::~G1RegionMarkStatsCache() {
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -37,9 +37,9 @@
                                              size_t region_granularity,
                                              size_t commit_factor,
                                              MemoryType type) :
+  _listener(NULL),
   _storage(rs, used_size, page_size),
   _region_granularity(region_granularity),
-  _listener(NULL),
   _commit_map(rs.size() * commit_factor / region_granularity, mtGC) {
   guarantee(is_power_of_2(page_size), "must be");
   guarantee(is_power_of_2(region_granularity), "must be");
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -282,13 +282,13 @@
 G1RemSet::G1RemSet(G1CollectedHeap* g1h,
                    G1CardTable* ct,
                    G1HotCardCache* hot_card_cache) :
+  _scan_state(new G1RemSetScanState()),
+  _prev_period_summary(),
   _g1h(g1h),
-  _scan_state(new G1RemSetScanState()),
   _num_conc_refined_cards(0),
   _ct(ct),
   _g1p(_g1h->g1_policy()),
-  _hot_card_cache(hot_card_cache),
-  _prev_period_summary() {
+  _hot_card_cache(hot_card_cache) {
 }
 
 G1RemSet::~G1RemSet() {
@@ -316,8 +316,8 @@
   _scan_objs_on_card_cl(scan_obj_on_card),
   _scan_state(scan_state),
   _worker_i(worker_i),
+  _cards_scanned(0),
   _cards_claimed(0),
-  _cards_scanned(0),
   _cards_skipped(0),
   _rem_set_root_scan_time(),
   _rem_set_trim_partially_time(),
@@ -976,8 +976,8 @@
                       uint n_workers,
                       uint worker_id_offset) :
       AbstractGangTask("G1 Rebuild Remembered Set"),
+      _hr_claimer(n_workers),
       _cm(cm),
-      _hr_claimer(n_workers),
       _worker_id_offset(worker_id_offset) {
   }
 
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -247,9 +247,10 @@
   HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
 
 public:
-  HRRSStatsIter() : _all("All"), _young("Young"), _humongous("Humongous"),
-    _free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
-    _max_rs_mem_sz(0), _max_code_root_mem_sz(0)
+  HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
+    _free("Free"), _old("Old"), _all("All"),
+    _max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL),
+    _max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL)
   {}
 
   bool do_heap_region(HeapRegion* r) {
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -72,8 +72,8 @@
     _g1h(g1h),
     _process_strong_tasks(G1RP_PS_NumElements),
     _srs(n_workers),
+    _par_state_string(StringTable::weak_storage()),
     _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
-    _par_state_string(StringTable::weak_storage()),
     _n_workers_discovered_strong_classes(0) {}
 
 void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -27,8 +27,9 @@
 #include "gc/g1/heapRegion.hpp"
 #include "logging/log.hpp"
 
-G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
-        _min_desired_young_length(0), _max_desired_young_length(0) {
+G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
+  _min_desired_young_length(0), _max_desired_young_length(0), _adaptive_size(true) {
+
   if (FLAG_IS_CMDLINE(NewRatio)) {
     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
       log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -230,16 +230,19 @@
                        G1BlockOffsetTable* bot,
                        MemRegion mr) :
     G1ContiguousSpace(bot),
+    _rem_set(NULL),
     _hrm_index(hrm_index),
+    _type(),
     _humongous_start_region(NULL),
     _evacuation_failed(false),
-    _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
     _next(NULL), _prev(NULL),
 #ifdef ASSERT
     _containing_set(NULL),
-#endif // ASSERT
-     _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
-    _rem_set(NULL), _recorded_rs_length(0), _predicted_elapsed_time_ms(0)
+#endif
+    _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
+    _young_index_in_cset(-1), _surv_rate_group(NULL), _age_index(-1),
+    _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
+    _recorded_rs_length(0), _predicted_elapsed_time_ms(0)
 {
   _rem_set = new HeapRegionRemSet(bot, this);
 
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -128,10 +128,13 @@
 
  public:
   // Empty constructor, we'll initialize it with the initialize() method.
-  HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0),
-                    _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
-                    _allocated_heapregions_length(0), _available_map(mtGC),
-                    _free_list("Free list", new MasterFreeRegionListMtSafeChecker())
+  HeapRegionManager() :
+   _regions(), _heap_mapper(NULL),
+   _prev_bitmap_mapper(NULL), _next_bitmap_mapper(NULL), _bot_mapper(NULL),
+   _cardtable_mapper(NULL), _card_counts_mapper(NULL),
+   _free_list("Free list", new MasterFreeRegionListMtSafeChecker()),
+   _available_map(mtGC), _num_committed(0),
+   _allocated_heapregions_length(0)
   { }
 
   void initialize(G1RegionToSpaceMapper* heap_storage,
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -68,9 +68,10 @@
 
   PerRegionTable(HeapRegion* hr) :
     _hr(hr),
+    _bm(HeapRegion::CardsPerRegion, mtGC),
     _occupied(0),
-    _bm(HeapRegion::CardsPerRegion, mtGC),
-    _collision_list_next(NULL), _next(NULL), _prev(NULL)
+    _next(NULL), _prev(NULL),
+    _collision_list_next(NULL)
   {}
 
   void add_card_work(CardIdx_t from_card, bool par) {
@@ -240,11 +241,14 @@
 
 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
   _g1h(G1CollectedHeap::heap()),
-  _hr(hr), _m(m),
+  _m(m),
+  _hr(hr),
   _coarse_map(G1CollectedHeap::heap()->max_regions(), mtGC),
+  _n_coarse_entries(0),
   _fine_grain_regions(NULL),
-  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
-  _n_fine_entries(0), _n_coarse_entries(0),
+  _n_fine_entries(0),
+  _first_all_fine_prts(NULL),
+  _last_all_fine_prts(NULL),
   _fine_eviction_start(0),
   _sparse_table(hr)
 {
@@ -621,10 +625,11 @@
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot,
                                    HeapRegion* hr)
   : _bot(bot),
+    _code_roots(),
     _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
-    _code_roots(),
-    _state(Untracked),
-    _other_regions(hr, &_m) {
+    _other_regions(hr, &_m),
+    _state(Untracked)
+{
 }
 
 void HeapRegionRemSet::setup_remset_size() {
@@ -716,18 +721,19 @@
 
 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
   _hrrs(hrrs),
-  _g1h(G1CollectedHeap::heap()),
   _coarse_map(&hrrs->_other_regions._coarse_map),
   _bot(hrrs->_bot),
+  _g1h(G1CollectedHeap::heap()),
+  _n_yielded_fine(0),
+  _n_yielded_coarse(0),
+  _n_yielded_sparse(0),
   _is(Sparse),
+  _cur_region_card_offset(0),
   // Set these values so that we increment to the first region.
   _coarse_cur_region_index(-1),
   _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
+  _fine_cur_prt(NULL),
   _cur_card_in_prt(HeapRegion::CardsPerRegion),
-  _fine_cur_prt(NULL),
-  _n_yielded_coarse(0),
-  _n_yielded_fine(0),
-  _n_yielded_sparse(0),
   _sparse_iter(&hrrs->_other_regions._sparse_table) {}
 
 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -83,9 +83,12 @@
 }
 
 HeapRegionSetBase::HeapRegionSetBase(const char* name, bool humongous, bool free, HRSMtSafeChecker* mt_safety_checker)
-  : _name(name), _verify_in_progress(false),
-    _is_humongous(humongous), _is_free(free), _mt_safety_checker(mt_safety_checker),
-    _length(0)
+  : _is_humongous(humongous),
+    _is_free(free),
+    _mt_safety_checker(mt_safety_checker),
+    _length(0),
+    _name(name),
+    _verify_in_progress(false)
 { }
 
 void FreeRegionList::set_unrealistically_long_length(uint len) {
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -237,8 +237,9 @@
     return hr;
   }
 
-  FreeRegionListIterator(FreeRegionList* list) : _curr(NULL), _list(list) {
-    _curr = list->_head;
+  FreeRegionListIterator(FreeRegionList* list)
+  : _list(list),
+    _curr(list->_head) {
   }
 };
 
--- a/src/hotspot/share/gc/g1/ptrQueue.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/ptrQueue.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -92,14 +92,20 @@
 
 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
   _buffer_size(0),
-  _max_completed_queue(0),
-  _cbl_mon(NULL), _fl_lock(NULL),
-  _notify_when_complete(notify_when_complete),
+  _cbl_mon(NULL),
   _completed_buffers_head(NULL),
   _completed_buffers_tail(NULL),
   _n_completed_buffers(0),
-  _process_completed_threshold(0), _process_completed(false),
-  _buf_free_list(NULL), _buf_free_list_sz(0)
+  _process_completed_threshold(0),
+  _process_completed(false),
+  _fl_lock(NULL),
+  _buf_free_list(NULL),
+  _buf_free_list_sz(0),
+  _fl_owner(NULL),
+  _all_active(false),
+  _notify_when_complete(notify_when_complete),
+  _max_completed_queue(0),
+  _completed_queue_padding(0)
 {
   _fl_owner = this;
 }
--- a/src/hotspot/share/gc/g1/ptrQueue.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/ptrQueue.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -257,7 +257,6 @@
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
 class PtrQueueSet {
-private:
   // The size of all buffers in the set.
   size_t _buffer_size;
 
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -88,11 +88,15 @@
 float RSHashTable::TableOccupancyFactor = 0.5f;
 
 RSHashTable::RSHashTable(size_t capacity) :
-  _capacity(capacity), _capacity_mask(capacity-1),
-  _occupied_entries(0), _occupied_cards(0),
+  _num_entries(0),
+  _capacity(capacity),
+  _capacity_mask(capacity-1),
+  _occupied_entries(0),
+  _occupied_cards(0),
   _entries(NULL),
   _buckets(NEW_C_HEAP_ARRAY(int, capacity, mtGC)),
-  _free_list(NullEntry), _free_region(0)
+  _free_region(0),
+  _free_list(NullEntry)
 {
   _num_entries = (capacity * TableOccupancyFactor) + 1;
   _entries = (SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, _num_entries * SparsePRTEntry::size(), mtGC);
--- a/src/hotspot/share/gc/g1/survRateGroup.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/survRateGroup.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -31,9 +31,14 @@
 #include "memory/allocation.hpp"
 
 SurvRateGroup::SurvRateGroup() :
-    _accum_surv_rate_pred(NULL),
-    _surv_rate_pred(NULL),
-    _stats_arrays_length(0) {
+  _stats_arrays_length(0),
+  _accum_surv_rate_pred(NULL),
+  _last_pred(0.0),
+  _surv_rate_pred(NULL),
+  _all_regions_allocated(0),
+  _region_num(0),
+  _setup_seq_num(0)
+{
   reset();
   start_adding_regions();
 }
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -47,8 +47,8 @@
   : VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
     _pause_succeeded(false),
     _should_initiate_conc_mark(should_initiate_conc_mark),
+    _should_retry_gc(false),
     _target_pause_time_ms(target_pause_time_ms),
-    _should_retry_gc(false),
     _old_marking_cycles_completed_before(0) {
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive",
--- a/src/hotspot/share/gc/g1/vm_operations_g1.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -49,7 +49,7 @@
 
 class VM_G1CollectForAllocation: public VM_CollectForAllocation {
 private:
-  bool      _pause_succeeded;
+  bool         _pause_succeeded;
 
   bool         _should_initiate_conc_mark;
   bool         _should_retry_gc;
--- a/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/adjoiningVirtualSpaces.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -31,8 +31,9 @@
                                                size_t min_low_byte_size,
                                                size_t min_high_byte_size,
                                                size_t alignment) :
+  _high(NULL), _low(NULL),
   _reserved_space(rs), _min_low_byte_size(min_low_byte_size),
-  _min_high_byte_size(min_high_byte_size), _low(0), _high(0),
+  _min_high_byte_size(min_high_byte_size),
   _alignment(alignment) {}
 
 // The maximum byte sizes are for the initial layout of the
--- a/src/hotspot/share/gc/parallel/gcTaskManager.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/gcTaskManager.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -373,9 +373,9 @@
 //
 GCTaskManager::GCTaskManager(uint workers) :
   _workers(workers),
+  _created_workers(0),
   _active_workers(0),
-  _idle_workers(0),
-  _created_workers(0) {
+  _idle_workers(0) {
   initialize();
 }
 
@@ -962,7 +962,7 @@
   _wait_helper.notify();
 }
 
-WaitHelper::WaitHelper() : _should_wait(true), _monitor(MonitorSupply::reserve()) {
+WaitHelper::WaitHelper() : _monitor(MonitorSupply::reserve()), _should_wait(true) {
   if (TraceGCTaskManager) {
     tty->print_cr("[" INTPTR_FORMAT "]"
                   " WaitHelper::WaitHelper()"
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -89,7 +89,7 @@
     char* last_page_scanned()            { return _last_page_scanned; }
     void set_last_page_scanned(char* p)  { _last_page_scanned = p;    }
    public:
-    LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _last_page_scanned(NULL), _allocation_failed(false) {
+    LGRPSpace(int l, size_t alignment) : _lgrp_id(l), _allocation_failed(false), _last_page_scanned(NULL) {
       _space = new MutableSpace(alignment);
       _alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
     }
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -33,7 +33,7 @@
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
-MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
+MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _alignment(alignment), _top(NULL) {
   assert(MutableSpace::alignment() % os::vm_page_size() == 0,
          "Space should be aligned");
   _mangler = new MutableSpaceMangler(this);
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -29,7 +29,7 @@
 #include "utilities/bitMap.inline.hpp"
 
 inline ParMarkBitMap::ParMarkBitMap():
-  _beg_bits(), _end_bits(), _region_start(NULL), _region_size(0), _virtual_space(NULL), _reserved_byte_size(0)
+  _region_start(NULL), _region_size(0), _beg_bits(), _end_bits(), _virtual_space(NULL), _reserved_byte_size(0)
 { }
 
 inline void ParMarkBitMap::clear_range(idx_t beg, idx_t end) {
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -183,13 +183,12 @@
 
   oop obj = NULL;
   ObjArrayTask task;
-  int random_seed = 17;
   do {
-    while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
+    while (ParCompactionManager::steal_objarray(which,  task)) {
       cm->follow_contents((objArrayOop)task.obj(), task.index());
       cm->follow_marking_stacks();
     }
-    while (ParCompactionManager::steal(which, &random_seed, obj)) {
+    while (ParCompactionManager::steal(which, obj)) {
       cm->follow_contents(obj);
       cm->follow_marking_stacks();
     }
@@ -217,10 +216,9 @@
   guarantee(cm->region_stack()->is_empty(), "Not empty");
 
   size_t region_index = 0;
-  int random_seed = 17;
 
   while(true) {
-    if (ParCompactionManager::steal(which, &random_seed, region_index)) {
+    if (ParCompactionManager::steal(which, region_index)) {
       PSParallelCompact::fill_and_update_region(cm, region_index);
       cm->drain_region_stacks();
     } else {
--- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -50,10 +50,10 @@
                         gc_pause_goal_sec,
                         gc_cost_ratio),
      _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0),
+     _latest_major_mutator_interval_seconds(0),
      _space_alignment(space_alignment),
+     _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
      _live_at_last_full_gc(init_promo_size),
-     _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec),
-     _latest_major_mutator_interval_seconds(0),
      _young_gen_change_for_major_pause_count(0)
 {
   // Sizing policy statistics
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -159,9 +159,9 @@
   // Access function for compaction managers
   static ParCompactionManager* gc_thread_compaction_manager(uint index);
 
-  static bool steal(int queue_num, int* seed, oop& t);
-  static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t);
-  static bool steal(int queue_num, int* seed, size_t& region);
+  static bool steal(int queue_num, oop& t);
+  static bool steal_objarray(int queue_num, ObjArrayTask& t);
+  static bool steal(int queue_num, size_t& region);
 
   // Process tasks remaining on any marking stack
   void follow_marking_stacks();
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -37,16 +37,16 @@
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-inline bool ParCompactionManager::steal(int queue_num, int* seed, oop& t) {
-  return stack_array()->steal(queue_num, seed, t);
+inline bool ParCompactionManager::steal(int queue_num, oop& t) {
+  return stack_array()->steal(queue_num, t);
 }
 
-inline bool ParCompactionManager::steal_objarray(int queue_num, int* seed, ObjArrayTask& t) {
-  return _objarray_queues->steal(queue_num, seed, t);
+inline bool ParCompactionManager::steal_objarray(int queue_num, ObjArrayTask& t) {
+  return _objarray_queues->steal(queue_num, t);
 }
 
-inline bool ParCompactionManager::steal(int queue_num, int* seed, size_t& region) {
-  return region_array()->steal(queue_num, seed, region);
+inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
+  return region_array()->steal(queue_num, region);
 }
 
 inline void ParCompactionManager::push(oop obj) {
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -2216,7 +2216,7 @@
   bool _enabled;
   size_t _total_regions;
 public:
-  FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)) { }
+  FillableRegionLogger() : _next_index(0), _enabled(log_develop_is_enabled(Trace, gc, compaction)), _total_regions(0) { }
   ~FillableRegionLogger() {
     log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
   }
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -159,7 +159,7 @@
   static PSPromotionManager* gc_thread_promotion_manager(uint index);
   static PSPromotionManager* vm_thread_promotion_manager();
 
-  static bool steal_depth(int queue_num, int* seed, StarTask& t);
+  static bool steal_depth(int queue_num, StarTask& t);
 
   PSPromotionManager();
 
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -322,8 +322,8 @@
   }
 }
 
-inline bool PSPromotionManager::steal_depth(int queue_num, int* seed, StarTask& t) {
-  return stack_array_depth()->steal(queue_num, seed, t);
+inline bool PSPromotionManager::steal_depth(int queue_num, StarTask& t) {
+  return stack_array_depth()->steal(queue_num, t);
 }
 
 #if TASKQUEUE_STATS
--- a/src/hotspot/share/gc/parallel/psTasks.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psTasks.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -141,10 +141,9 @@
   guarantee(pm->stacks_empty(),
             "stacks should be empty at this point");
 
-  int random_seed = 17;
   while(true) {
     StarTask p;
-    if (PSPromotionManager::steal_depth(which, &random_seed, p)) {
+    if (PSPromotionManager::steal_depth(which, p)) {
       TASKQUEUE_STATS_ONLY(pm->record_steal(p));
       pm->process_popped_location_depth(p);
       pm->drain_stacks_depth(true);
--- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -49,16 +49,16 @@
                                        size_t init_survivor_size,
                                        double gc_pause_goal_sec,
                                        uint gc_cost_ratio) :
+    _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
     _eden_size(init_eden_size),
     _promo_size(init_promo_size),
     _survivor_size(init_survivor_size),
-    _gc_pause_goal_sec(gc_pause_goal_sec),
-    _throughput_goal(1.0 - double(1.0 / (1.0 + (double) gc_cost_ratio))),
     _gc_overhead_limit_exceeded(false),
     _print_gc_overhead_limit_would_be_exceeded(false),
     _gc_overhead_limit_count(0),
     _latest_minor_mutator_interval_seconds(0),
     _threshold_tolerance_percent(1.0 + ThresholdTolerance/100.0),
+    _gc_pause_goal_sec(gc_pause_goal_sec),
     _young_gen_change_for_minor_throughput(0),
     _old_gen_change_for_major_throughput(0) {
   assert(AdaptiveSizePolicyGCTimeLimitThreshold > 0,
--- a/src/hotspot/share/gc/shared/cardTable.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTable.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -45,15 +45,15 @@
   _scanned_concurrently(conc_scan),
   _whole_heap(whole_heap),
   _guard_index(0),
-  _guard_region(),
   _last_valid_index(0),
   _page_size(os::vm_page_size()),
   _byte_map_size(0),
+  _byte_map(NULL),
+  _byte_map_base(NULL),
+  _cur_covered_regions(0),
   _covered(NULL),
   _committed(NULL),
-  _cur_covered_regions(0),
-  _byte_map(NULL),
-  _byte_map_base(NULL)
+  _guard_region()
 {
   assert((uintptr_t(_whole_heap.start())  & (card_size - 1))  == 0, "heap must start at card boundary");
   assert((uintptr_t(_whole_heap.end()) & (card_size - 1))  == 0, "heap must end at card boundary");
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -45,11 +45,11 @@
 // CollectorPolicy methods
 
 CollectorPolicy::CollectorPolicy() :
-    _space_alignment(0),
-    _heap_alignment(0),
     _initial_heap_byte_size(InitialHeapSize),
     _max_heap_byte_size(MaxHeapSize),
-    _min_heap_byte_size(Arguments::min_heap_size())
+    _min_heap_byte_size(Arguments::min_heap_size()),
+    _space_alignment(0),
+    _heap_alignment(0)
 {}
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcTraceTime.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -79,14 +79,14 @@
 }
 
 inline GCTraceTimeImpl::GCTraceTimeImpl(LogTargetHandle out_start, LogTargetHandle out_stop, const char* title, GCTimer* timer, GCCause::Cause gc_cause, bool log_heap_usage) :
-  _enabled(out_stop.is_enabled()),
   _out_start(out_start),
   _out_stop(out_stop),
+  _enabled(out_stop.is_enabled()),
   _start_ticks(),
-  _heap_usage_before(SIZE_MAX),
   _title(title),
   _gc_cause(gc_cause),
-  _timer(timer) {
+  _timer(timer),
+  _heap_usage_before(SIZE_MAX) {
 
   time_stamp(_start_ticks);
   if (_enabled) {
--- a/src/hotspot/share/gc/shared/gcUtil.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/gcUtil.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -73,8 +73,8 @@
  public:
   // Input weight must be between 0 and 100
   AdaptiveWeightedAverage(unsigned weight, float avg = 0.0) :
-    _average(avg), _sample_count(0), _weight(weight), _last_sample(0.0),
-    _is_old(false) {
+    _average(avg), _sample_count(0), _weight(weight),
+    _is_old(false), _last_sample(0.0) {
   }
 
   void clear() {
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -73,7 +73,6 @@
                                    Generation::Name old,
                                    const char* policy_counters_name) :
   CollectedHeap(),
-  _rem_set(NULL),
   _young_gen_spec(new GenerationSpec(young,
                                      policy->initial_young_size(),
                                      policy->max_young_size(),
@@ -82,11 +81,12 @@
                                    policy->initial_old_size(),
                                    policy->max_old_size(),
                                    policy->gen_alignment())),
+  _rem_set(NULL),
   _gen_policy(policy),
   _soft_ref_gen_policy(),
   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
-  _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
-  _full_collections_completed(0) {
+  _full_collections_completed(0),
+  _process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)) {
 }
 
 jint GenCollectedHeap::initialize() {
--- a/src/hotspot/share/gc/shared/generation.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/generation.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -43,8 +43,8 @@
 #include "utilities/events.hpp"
 
 Generation::Generation(ReservedSpace rs, size_t initial_size) :
-  _ref_processor(NULL),
-  _gc_manager(NULL) {
+  _gc_manager(NULL),
+  _ref_processor(NULL) {
   if (!_virtual_space.initialize(rs, initial_size)) {
     vm_exit_during_initialization("Could not reserve enough space for "
                     "object heap");
--- a/src/hotspot/share/gc/shared/generationCounters.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/generationCounters.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -56,7 +56,7 @@
   // constructor. The need for such an constructor should be eliminated
   // when VirtualSpace and PSVirtualSpace are unified.
   GenerationCounters()
-             : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
+             : _current_size(NULL), _virtual_space(NULL), _name_space(NULL) {}
 
   // This constructor is used for subclasses that do not have a space
   // associated with them (e.g, in G1).
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -104,10 +104,10 @@
   _is_subject_to_discovery(is_subject_to_discovery),
   _discovering_refs(false),
   _enqueuing_is_done(false),
-  _is_alive_non_header(is_alive_non_header),
   _processing_is_mt(mt_processing),
   _next_id(0),
-  _adjust_no_of_processing_threads(adjust_no_of_processing_threads)
+  _adjust_no_of_processing_threads(adjust_no_of_processing_threads),
+  _is_alive_non_header(is_alive_non_header)
 {
   assert(is_subject_to_discovery != NULL, "must be set");
 
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -39,7 +39,7 @@
 // List of discovered references.
 class DiscoveredList {
 public:
-  DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
+  DiscoveredList() : _oop_head(NULL), _compressed_head(0), _len(0) { }
   inline oop head() const;
   HeapWord* adr_head() {
     return UseCompressedOops ? (HeapWord*)&_compressed_head :
--- a/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -59,14 +59,17 @@
   _prev_discovered_addr(refs_list.adr_head()),
   _prev_discovered(NULL),
   _current_discovered(refs_list.head()),
+  _current_discovered_addr(NULL),
+  _next_discovered(NULL),
+  _referent_addr(NULL),
+  _referent(NULL),
+  _keep_alive(keep_alive),
+  _is_alive(is_alive),
 #ifdef ASSERT
   _first_seen(refs_list.head()),
 #endif
   _processed(0),
-  _removed(0),
-  _next_discovered(NULL),
-  _keep_alive(keep_alive),
-  _is_alive(is_alive) {
+  _removed(0) {
 }
 
 #endif // SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -164,7 +164,7 @@
 RefProcTotalPhaseTimesTracker::RefProcTotalPhaseTimesTracker(ReferenceProcessor::RefProcPhases phase_number,
                                                              ReferenceProcessorPhaseTimes* phase_times,
                                                              ReferenceProcessor* rp) :
-  _rp(rp), RefProcPhaseTimeBaseTracker(phase_enum_2_phase_string(phase_number), phase_number, phase_times) {
+  RefProcPhaseTimeBaseTracker(phase_enum_2_phase_string(phase_number), phase_number, phase_times), _rp(rp) {
 }
 
 RefProcTotalPhaseTimesTracker::~RefProcTotalPhaseTimesTracker() {
@@ -173,7 +173,7 @@
 }
 
 ReferenceProcessorPhaseTimes::ReferenceProcessorPhaseTimes(GCTimer* gc_timer, uint max_gc_threads) :
-  _gc_timer(gc_timer), _processing_is_mt(false) {
+  _processing_is_mt(false), _gc_timer(gc_timer) {
 
   for (uint i = 0; i < ReferenceProcessor::RefSubPhaseMax; i++) {
     _sub_phases_worker_time_sec[i] = new WorkerDataArray<double>(max_gc_threads, SubPhasesParWorkTitle[i]);
--- a/src/hotspot/share/gc/shared/space.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -289,7 +289,7 @@
   DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl,
                         CardTable::PrecisionStyle precision,
                         HeapWord* boundary) :
-    _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
+    _cl(cl), _sp(sp), _precision(precision), _boundary(boundary),
     _min_done(NULL) {
     NOT_PRODUCT(_last_bottom = NULL);
     NOT_PRODUCT(_last_explicit_min_done = NULL);
--- a/src/hotspot/share/gc/shared/space.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/space.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -88,7 +88,7 @@
   CompactibleSpace* _space;
 
 public:
-  DeadSpacer(CompactibleSpace* space) : _space(space), _allowed_deadspace_words(0) {
+  DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
     size_t ratio = _space->allowed_dead_ratio();
     _active = ratio > 0;
 
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -224,8 +224,8 @@
 StringDedupTable::StringDedupTable(size_t size, jint hash_seed) :
   _size(size),
   _entries(0),
+  _shrink_threshold((uintx)(size * _shrink_load_factor)),
   _grow_threshold((uintx)(size * _grow_load_factor)),
-  _shrink_threshold((uintx)(size * _shrink_load_factor)),
   _rehash_needed(false),
   _hash_seed(hash_seed) {
   assert(is_power_of_2(size), "Table size must be a power of 2");
--- a/src/hotspot/share/gc/shared/taskqueue.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -111,24 +111,6 @@
 #endif // ASSERT
 #endif // TASKQUEUE_STATS
 
-int TaskQueueSetSuper::randomParkAndMiller(int *seed0) {
-  const int a =      16807;
-  const int m = 2147483647;
-  const int q =     127773;  /* m div a */
-  const int r =       2836;  /* m mod a */
-  assert(sizeof(int) == 4, "I think this relies on that");
-  int seed = *seed0;
-  int hi   = seed / q;
-  int lo   = seed % q;
-  int test = a * lo - r * hi;
-  if (test > 0)
-    seed = test;
-  else
-    seed = test + m;
-  *seed0 = seed;
-  return seed;
-}
-
 ParallelTaskTerminator::
 ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
   _n_threads(n_threads),
--- a/src/hotspot/share/gc/shared/taskqueue.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_SHARED_TASKQUEUE_HPP
 
 #include "memory/allocation.hpp"
+#include "memory/padded.hpp"
 #include "oops/oopsHierarchy.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/stack.hpp"
@@ -298,12 +299,30 @@
   template<typename Fn> void iterate(Fn fn);
 
 private:
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
   // Element array.
   volatile E* _elems;
+
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(E*));
+  // Queue owner local variables. Not to be accessed by other threads.
+
+  static const uint InvalidQueueId = uint(-1);
+  uint _last_stolen_queue_id; // The id of the queue we last stole from
+
+  int _seed; // Current random seed used for selecting a random queue during stealing.
+
+  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(uint) + sizeof(int));
+public:
+  int next_random_queue_id();
+
+  void set_last_stolen_queue_id(uint id)     { _last_stolen_queue_id = id; }
+  uint last_stolen_queue_id() const          { return _last_stolen_queue_id; }
+  bool is_last_stolen_queue_id_valid() const { return _last_stolen_queue_id != InvalidQueueId; }
+  void invalidate_last_stolen_queue_id()     { _last_stolen_queue_id = InvalidQueueId; }
 };
 
 template<class E, MEMFLAGS F, unsigned int N>
-GenericTaskQueue<E, F, N>::GenericTaskQueue() {
+GenericTaskQueue<E, F, N>::GenericTaskQueue() : _last_stolen_queue_id(InvalidQueueId), _seed(17 /* random number */) {
   assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
 }
 
@@ -348,8 +367,6 @@
 };
 
 class TaskQueueSetSuper {
-protected:
-  static int randomParkAndMiller(int* seed0);
 public:
   // Returns "true" if some TaskQueue in the set contains a task.
   virtual bool peek() = 0;
@@ -367,22 +384,19 @@
   uint _n;
   T** _queues;
 
-  bool steal_best_of_2(uint queue_num, int* seed, E& t);
+  bool steal_best_of_2(uint queue_num, E& t);
 
 public:
-  GenericTaskQueueSet(int n);
+  GenericTaskQueueSet(uint n);
   ~GenericTaskQueueSet();
 
   void register_queue(uint i, T* q);
 
   T* queue(uint n);
 
-  // The thread with queue number "queue_num" (and whose random number seed is
-  // at "seed") is trying to steal a task from some other queue.  (It may try
-  // several queues, according to some configuration parameter.)  If some steal
-  // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
-  // false.
-  bool steal(uint queue_num, int* seed, E& t);
+  // Try to steal a task from some other queue than queue_num. It may perform several attempts at doing so.
+  // Returns if stealing succeeds, and sets "t" to the stolen task.
+  bool steal(uint queue_num, E& t);
 
   bool peek();
 
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -34,10 +34,10 @@
 #include "utilities/stack.inline.hpp"
 
 template <class T, MEMFLAGS F>
-inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(int n) : _n(n) {
+inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(uint n) : _n(n) {
   typedef T* GenericTaskQueuePtr;
   _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
-  for (int i = 0; i < n; i++) {
+  for (uint i = 0; i < n; i++) {
     _queues[i] = NULL;
   }
 }
@@ -227,18 +227,71 @@
   return resAge == oldAge;
 }
 
+inline int randomParkAndMiller(int *seed0) {
+  const int a =      16807;
+  const int m = 2147483647;
+  const int q =     127773;  /* m div a */
+  const int r =       2836;  /* m mod a */
+  STATIC_ASSERT(sizeof(int) == 4);
+  int seed = *seed0;
+  int hi   = seed / q;
+  int lo   = seed % q;
+  int test = a * lo - r * hi;
+  if (test > 0) {
+    seed = test;
+  } else {
+    seed = test + m;
+  }
+  *seed0 = seed;
+  return seed;
+}
+
+template<class E, MEMFLAGS F, unsigned int N>
+int GenericTaskQueue<E, F, N>::next_random_queue_id() {
+  return randomParkAndMiller(&_seed);
+}
+
 template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
+GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, E& t) {
   if (_n > 2) {
+    T* const local_queue = _queues[queue_num];
     uint k1 = queue_num;
-    while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+
+    if (local_queue->is_last_stolen_queue_id_valid()) {
+      k1 = local_queue->last_stolen_queue_id();
+      assert(k1 != queue_num, "Should not be the same");
+    } else {
+      while (k1 == queue_num) {
+        k1 = local_queue->next_random_queue_id() % _n;
+      }
+    }
+
     uint k2 = queue_num;
-    while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
+    while (k2 == queue_num || k2 == k1) {
+      k2 = local_queue->next_random_queue_id() % _n;
+    }
     // Sample both and try the larger.
     uint sz1 = _queues[k1]->size();
     uint sz2 = _queues[k2]->size();
-    if (sz2 > sz1) return _queues[k2]->pop_global(t);
-    else return _queues[k1]->pop_global(t);
+
+    uint sel_k = 0;
+    bool suc = false;
+
+    if (sz2 > sz1) {
+      sel_k = k2;
+      suc = _queues[k2]->pop_global(t);
+    } else if (sz1 > 0) {
+      sel_k = k1;
+      suc = _queues[k1]->pop_global(t);
+    }
+
+    if (suc) {
+      local_queue->set_last_stolen_queue_id(sel_k);
+    } else {
+      local_queue->invalidate_last_stolen_queue_id();
+    }
+
+    return suc;
   } else if (_n == 2) {
     // Just try the other one.
     uint k = (queue_num + 1) % 2;
@@ -250,10 +303,10 @@
 }
 
 template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
+GenericTaskQueueSet<T, F>::steal(uint queue_num, E& t) {
   for (uint i = 0; i < 2 * _n; i++) {
     TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal_attempt());
-    if (steal_best_of_2(queue_num, seed, t)) {
+    if (steal_best_of_2(queue_num, t)) {
       TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal());
       return true;
     }
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -115,7 +115,7 @@
   static GlobalTLABStats* global_stats() { return _global_stats; }
 
 public:
-  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
+  ThreadLocalAllocBuffer() : _allocated_before_last_gc(0), _allocation_fraction(TLABAllocationWeight) {
     // do nothing.  tlabs must be inited by initialize() calls
   }
 
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -186,7 +186,7 @@
                                                                  uint full_gc_count_before,
                                                                  GCCause::Cause gc_cause)
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
-      _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
+      _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
   assert(_size != 0, "An allocation should always be requested with this operation.");
   AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
 }
@@ -282,7 +282,7 @@
 }
 
 VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
-    : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {
+    : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) {
   // Only report if operation was really caused by an allocation.
   if (_word_size != 0) {
     AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
--- a/src/hotspot/share/gc/shared/vmGCOperations.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/vmGCOperations.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -227,7 +227,7 @@
  private:
   JvmtiGCMarker _jgcm;
  public:
-  typedef enum { MINOR, FULL, CONCURRENT, OTHER } reason_type;
+  typedef enum { MINOR, FULL, CONCURRENT } reason_type;
 
   SvcGCMarker(reason_type reason ) {
     VM_GC_Operation::notify_gc_begin(reason == FULL);
--- a/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -31,10 +31,10 @@
 
 template <typename T>
 WorkerDataArray<T>::WorkerDataArray(uint length, const char* title) :
- _title(title),
- _length(0) {
+ _data(NULL),
+ _length(length),
+ _title(title) {
   assert(length > 0, "Must have some workers to store data for");
-  _length = length;
   _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
   for (uint i = 0; i < MaxThreadWorkItems; i++) {
     _thread_work_items[i] = NULL;
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -187,12 +187,13 @@
   Monitor* _monitor;
 
  public:
-  MutexGangTaskDispatcher()
-      : _task(NULL),
-        _monitor(new Monitor(Monitor::leaf, "WorkGang dispatcher lock", false, Monitor::_safepoint_check_never)),
-        _started(0),
-        _finished(0),
-        _num_workers(0) {}
+  MutexGangTaskDispatcher() :
+    _task(NULL),
+    _started(0),
+    _finished(0),
+    _num_workers(0),
+    _monitor(new Monitor(Monitor::leaf, "WorkGang dispatcher lock", false, Monitor::_safepoint_check_never)) {
+  }
 
   ~MutexGangTaskDispatcher() {
     delete _monitor;
@@ -408,7 +409,7 @@
 // SubTasksDone functions.
 
 SubTasksDone::SubTasksDone(uint n) :
-  _n_tasks(n), _tasks(NULL) {
+  _tasks(NULL), _n_tasks(n), _threads_completed(0) {
   _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
   guarantee(_tasks != NULL, "alloc failure");
   clear();
--- a/src/hotspot/share/gc/shared/workgroup.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/shared/workgroup.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -130,10 +130,11 @@
 
  public:
   AbstractWorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
-      _name(name),
+      _workers(NULL),
       _total_workers(workers),
       _active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
       _created_workers(0),
+      _name(name),
       _are_GC_task_threads(are_GC_task_threads),
       _are_ConcurrentGC_threads(are_ConcurrentGC_threads)
   { }
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -22,8 +22,12 @@
  */
 
 #include "precompiled.hpp"
+#ifdef COMPILER1
 #include "gc/z/c1/zBarrierSetC1.hpp"
+#endif
+#ifdef COMPILER2
 #include "gc/z/c2/zBarrierSetC2.hpp"
+#endif
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
 #include "gc/z/zGlobals.hpp"
@@ -33,8 +37,8 @@
 
 ZBarrierSet::ZBarrierSet() :
     BarrierSet(make_barrier_set_assembler<ZBarrierSetAssembler>(),
-               make_barrier_set_c1<ZBarrierSetC1>(),
-               make_barrier_set_c2<ZBarrierSetC2>(),
+               COMPILER1_PRESENT( make_barrier_set_c1<ZBarrierSetC1>() ) NOT_COMPILER1(NULL),
+               COMPILER2_PRESENT( make_barrier_set_c2<ZBarrierSetC2>() ) NOT_COMPILER2(NULL),
                BarrierSet::FakeRtti(BarrierSet::ZBarrierSet)) {}
 
 ZBarrierSetAssembler* ZBarrierSet::assembler() {
--- a/src/hotspot/share/gc/z/zDriver.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zDriver.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -98,7 +98,7 @@
     ZStatSample(ZSamplerJavaThreads, Threads::number_of_threads());
 
     // JVMTI support
-    SvcGCMarker sgcm(SvcGCMarker::OTHER);
+    SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
 
     // Setup GC id
     GCIdMark gcid(_gc_id);
--- a/src/hotspot/share/gc/z/zHeap.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -96,7 +96,7 @@
 }
 
 bool ZHeap::is_initialized() const {
-  return _page_allocator.is_initialized();
+  return _page_allocator.is_initialized() && _mark.is_initialized();
 }
 
 size_t ZHeap::min_capacity() const {
--- a/src/hotspot/share/gc/z/zMark.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zMark.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -70,6 +70,10 @@
     _ncontinue(0),
     _nworkers(0) {}
 
+bool ZMark::is_initialized() const {
+  return _allocator.is_initialized();
+}
+
 size_t ZMark::calculate_nstripes(uint nworkers) const {
   // Calculate the number of stripes from the number of workers we use,
   // where the number of stripes must be a power of two and we want to
--- a/src/hotspot/share/gc/z/zMark.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zMark.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,8 @@
 public:
   ZMark(ZWorkers* workers, ZPageTable* pagetable);
 
+  bool is_initialized() const;
+
   template <bool finalizable, bool publish> void mark_object(uintptr_t addr);
 
   void start();
--- a/src/hotspot/share/gc/z/zMarkStack.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zMarkStack.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -56,22 +56,24 @@
   return _top != 0;
 }
 
-bool ZMarkStackSpace::expand() {
+void ZMarkStackSpace::expand() {
   const size_t max = ZMarkStackSpaceStart + ZMarkStacksMax;
   if (_end + ZMarkStackSpaceExpandSize > max) {
-    // Expansion limit reached
-    return false;
+    // Expansion limit reached. This is a fatal error since we
+    // currently can't recover from running out of mark stack space.
+    fatal("Mark stack overflow (current size " SIZE_FORMAT "M, max size " SIZE_FORMAT "M),"
+          " use -XX:ZMarkStacksMax=<size> to increase this limit",
+          (_end - ZMarkStackSpaceStart) / M, ZMarkStacksMax / M);
   }
 
   void* const res = mmap((void*)_end, ZMarkStackSpaceExpandSize,
                          PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
   if (res == MAP_FAILED) {
+    // Failed to map memory. This is a fatal error since we
+    // currently can't recover from running out of mark stack space.
     ZErrno err;
-    log_error(gc, marking)("Failed to map memory for marking stacks (%s)", err.to_string());
-    return false;
+    fatal("Failed to map memory for marking stacks (%s)", err.to_string());
   }
-
-  return true;
 }
 
 uintptr_t ZMarkStackSpace::alloc_space(size_t size) {
@@ -105,14 +107,7 @@
   }
 
   // Expand stack space
-  if (!expand()) {
-    // We currently can't handle the situation where we
-    // are running out of mark stack space.
-    fatal("Mark stack overflow (allocated " SIZE_FORMAT "M, size " SIZE_FORMAT "M, max " SIZE_FORMAT "M),"
-          " use -XX:ZMarkStacksMax=? to increase this limit",
-          (_end - ZMarkStackSpaceStart) / M, size / M, ZMarkStacksMax / M);
-    return 0;
-  }
+  expand();
 
   log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M",
                          (_end - ZMarkStackSpaceStart) / M,
--- a/src/hotspot/share/gc/z/zMarkStack.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -79,7 +79,7 @@
   volatile uintptr_t _top;
   volatile uintptr_t _end;
 
-  bool expand();
+  void expand();
 
   uintptr_t alloc_space(size_t size);
   uintptr_t expand_and_alloc_space(size_t size);
--- a/src/hotspot/share/gc/z/zServiceability.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zServiceability.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -30,56 +30,63 @@
 #include "memory/metaspaceCounters.hpp"
 #include "runtime/perfData.hpp"
 
-class ZOldGenerationCounters : public GenerationCounters {
+class ZGenerationCounters : public GenerationCounters {
 public:
-  ZOldGenerationCounters(const char* name, size_t min_capacity, size_t max_capacity) :
-    // The "1, 1" parameters are for the n-th generation (=1) with 1 space.
-    GenerationCounters(name,
-                       1 /* ordinal */,
-                       1 /* spaces */,
-                       min_capacity /* min_capacity */,
-                       max_capacity /* max_capacity */,
-                       min_capacity /* curr_capacity */) {}
+  ZGenerationCounters(const char* name, int ordinal, int spaces,
+                      size_t min_capacity, size_t max_capacity, size_t curr_capacity) :
+      GenerationCounters(name, ordinal, spaces,
+                         min_capacity, max_capacity, curr_capacity) {}
 
-  virtual void update_all() {
-    size_t committed = ZHeap::heap()->capacity();
-    _current_size->set_value(committed);
+  void update_capacity(size_t capacity) {
+    _current_size->set_value(capacity);
   }
 };
 
 // Class to expose perf counters used by jstat.
 class ZServiceabilityCounters : public CHeapObj<mtGC> {
 private:
-  ZOldGenerationCounters _old_collection_counters;
-  HSpaceCounters         _old_space_counters;
+  ZGenerationCounters _generation_counters;
+  HSpaceCounters      _space_counters;
+  CollectorCounters   _collector_counters;
 
 public:
   ZServiceabilityCounters(size_t min_capacity, size_t max_capacity);
 
+  CollectorCounters* collector_counters();
+
   void update_sizes();
 };
 
 ZServiceabilityCounters::ZServiceabilityCounters(size_t min_capacity, size_t max_capacity) :
     // generation.1
-    _old_collection_counters("old",
-                             min_capacity,
-                             max_capacity),
+    _generation_counters("old"        /* name */,
+                         1            /* ordinal */,
+                         1            /* spaces */,
+                         min_capacity /* min_capacity */,
+                         max_capacity /* max_capacity */,
+                         min_capacity /* curr_capacity */),
     // generation.1.space.0
-    _old_space_counters(_old_collection_counters.name_space(),
-                        "space",
-                        0 /* ordinal */,
-                        max_capacity /* max_capacity */,
-                        min_capacity /* init_capacity */) {}
+    _space_counters(_generation_counters.name_space(),
+                    "space"      /* name */,
+                    0            /* ordinal */,
+                    max_capacity /* max_capacity */,
+                    min_capacity /* init_capacity */),
+    // gc.collector.2
+    _collector_counters("stop-the-world" /* name */,
+                        2                /* ordinal */) {}
+
+CollectorCounters* ZServiceabilityCounters::collector_counters() {
+  return &_collector_counters;
+}
 
 void ZServiceabilityCounters::update_sizes() {
   if (UsePerfData) {
-    size_t capacity = ZHeap::heap()->capacity();
-    size_t used = MIN2(ZHeap::heap()->used(), capacity);
+    const size_t capacity = ZHeap::heap()->capacity();
+    const size_t used = MIN2(ZHeap::heap()->used(), capacity);
 
-    _old_space_counters.update_capacity(capacity);
-    _old_space_counters.update_used(used);
-
-    _old_collection_counters.update_all();
+    _generation_counters.update_capacity(capacity);
+    _space_counters.update_capacity(capacity);
+    _space_counters.update_used(used);
 
     MetaspaceCounters::update_performance_counters();
     CompressedClassSpaceCounters::update_performance_counters();
@@ -147,10 +154,8 @@
            is_gc_end   /* recordGCEndTime */,
            is_gc_end   /* countCollection */) {}
 
-ZServiceabilityCountersTracer::ZServiceabilityCountersTracer() {
-  // Nothing to trace with TraceCollectorStats, since ZGC has
-  // neither a young collector nor a full collector.
-}
+ZServiceabilityCountersTracer::ZServiceabilityCountersTracer() :
+    _stats(ZHeap::heap()->serviceability_counters()->collector_counters()) {}
 
 ZServiceabilityCountersTracer::~ZServiceabilityCountersTracer() {
   ZHeap::heap()->serviceability_counters()->update_sizes();
--- a/src/hotspot/share/gc/z/zServiceability.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zServiceability.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 #ifndef SHARE_GC_Z_ZSERVICEABILITY_HPP
 #define SHARE_GC_Z_ZSERVICEABILITY_HPP
 
+#include "gc/shared/collectorCounters.hpp"
 #include "memory/allocation.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memoryPool.hpp"
@@ -76,6 +77,9 @@
 };
 
 class ZServiceabilityCountersTracer {
+private:
+  TraceCollectorStats _stats;
+
 public:
   ZServiceabilityCountersTracer();
   ~ZServiceabilityCountersTracer();
--- a/src/hotspot/share/gc/z/zStat.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/gc/z/zStat.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -83,8 +83,8 @@
   ZStatSamplerHistoryInterval() :
       _next(0),
       _samples(),
-      _total(),
-      _accumulated() {}
+      _accumulated(),
+      _total() {}
 
   bool add(const ZStatSamplerData& new_sample) {
     // Insert sample
--- a/src/hotspot/share/interpreter/linkResolver.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/interpreter/linkResolver.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -159,21 +159,21 @@
   LinkInfo(Klass* resolved_klass, Symbol* name, Symbol* signature, Klass* current_klass,
            AccessCheck check_access = needs_access_check,
            constantTag tag = JVM_CONSTANT_Invalid) :
-    _resolved_klass(resolved_klass),
-    _name(name), _signature(signature), _current_klass(current_klass), _current_method(methodHandle()),
+    _name(name),
+    _signature(signature), _resolved_klass(resolved_klass), _current_klass(current_klass), _current_method(methodHandle()),
     _check_access(check_access == needs_access_check), _tag(tag) {}
 
   LinkInfo(Klass* resolved_klass, Symbol* name, Symbol* signature, const methodHandle& current_method,
            AccessCheck check_access = needs_access_check,
            constantTag tag = JVM_CONSTANT_Invalid) :
-    _resolved_klass(resolved_klass),
-    _name(name), _signature(signature), _current_klass(current_method->method_holder()), _current_method(current_method),
+    _name(name),
+    _signature(signature), _resolved_klass(resolved_klass), _current_klass(current_method->method_holder()), _current_method(current_method),
     _check_access(check_access == needs_access_check), _tag(tag) {}
 
   // Case where we just find the method and don't check access against the current class
   LinkInfo(Klass* resolved_klass, Symbol*name, Symbol* signature) :
-    _resolved_klass(resolved_klass),
-    _name(name), _signature(signature), _current_klass(NULL), _current_method(methodHandle()),
+    _name(name),
+    _signature(signature), _resolved_klass(resolved_klass), _current_klass(NULL), _current_method(methodHandle()),
     _check_access(false), _tag(JVM_CONSTANT_Invalid) {}
 
   // accessors
--- a/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/dcmd/jfrDcmds.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -345,8 +345,8 @@
   _settings("settings", "Settings file(s), e.g. profile or default. See JRE_HOME/lib/jfr", "STRING SET", false),
   _delay("delay", "Delay recording start with (s)econds, (m)inutes), (h)ours), or (d)ays, e.g. 5h.", "NANOTIME", false, "0"),
   _duration("duration", "Duration of recording in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 300s.", "NANOTIME", false, "0"),
+  _disk("disk", "Recording should be persisted to disk", "BOOLEAN", false),
   _filename("filename", "Resulting recording filename, e.g. \\\"" JFR_FILENAME_EXAMPLE "\\\"", "STRING", false),
-  _disk("disk", "Recording should be persisted to disk", "BOOLEAN", false),
   _maxage("maxage", "Maximum time to keep recorded data (on disk) in (s)econds, (m)inutes, (h)ours, or (d)ays, e.g. 60m, or 0 for no limit", "NANOTIME", false, "0"),
   _maxsize("maxsize", "Maximum amount of bytes to keep (on disk) in (k)B, (M)B or (G)B, e.g. 500M, or 0 for no limit", "MEMORY SIZE", false, "0"),
   _dump_on_exit("dumponexit", "Dump running recording when JVM shuts down", "BOOLEAN", false),
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -265,8 +265,8 @@
   AnnotationElementIterator(const InstanceKlass* ik, address buffer, u2 limit) : _ik(ik),
                                                                                  _buffer(buffer),
                                                                                  _limit(limit),
-                                                                                 _next(element_name_offset),
-                                                                                 _current(element_name_offset) {
+                                                                                 _current(element_name_offset),
+                                                                                 _next(element_name_offset) {
     assert(_buffer != NULL, "invariant");
     assert(_next == element_name_offset, "invariant");
     assert(_current == element_name_offset, "invariant");
@@ -319,10 +319,10 @@
 
  public:
   AnnotationIterator(const InstanceKlass* ik, AnnotationArray* ar) : _ik(ik),
+                                                                     _limit(ar != NULL ? ar->length() : 0),
+                                                                     _buffer(_limit > 2 ? ar->adr_at(2) : NULL),
                                                                      _current(0),
-                                                                     _next(0),
-                                                                     _limit(ar != NULL ? ar->length() : 0),
-                                                                     _buffer(_limit > 2 ? ar->adr_at(2) : NULL) {
+                                                                     _next(0) {
     if (_buffer != NULL) {
       _limit -= 2; // subtract sizeof(u2) number of annotations field
     }
--- a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -31,9 +31,9 @@
   _free_list(),
   _in_use_list(),
   _last_resolved(NULL),
+  _allocated(0),
   _limit(limit),
-  _cache_size(cache_size),
-  _allocated(0) {
+  _cache_size(cache_size) {
 }
 
 SampleList::~SampleList() {
--- a/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrCallTrace.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -37,7 +37,7 @@
   bool _in_java;
 
  public:
-  JfrGetCallTrace(bool in_java, JavaThread* thread) : _in_java(in_java), _thread(thread) {}
+  JfrGetCallTrace(bool in_java, JavaThread* thread) : _thread(thread), _in_java(in_java) {}
   bool find_top_frame(frame& topframe, Method** method, frame& first_frame);
   bool get_topframe(void* ucontext, frame& top);
 };
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -28,7 +28,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 
-JfrSymbolId::JfrSymbolId() : _symbol_id_counter(0), _sym_table(new SymbolTable(this)), _cstring_table(new CStringTable(this)) {
+JfrSymbolId::JfrSymbolId() : _sym_table(new SymbolTable(this)), _cstring_table(new CStringTable(this)), _symbol_id_counter(0) {
   assert(_sym_table != NULL, "invariant");
   assert(_cstring_table != NULL, "invariant");
   initialize();
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -239,8 +239,8 @@
   _checkpoint_manager(JfrCheckpointManager::instance()),
   _chunkwriter(JfrRepository::chunkwriter()),
   _repository(JfrRepository::instance()),
+  _stack_trace_repository(JfrStackTraceRepository::instance()),
   _storage(JfrStorage::instance()),
-  _stack_trace_repository(JfrStackTraceRepository::instance()),
   _string_pool(JfrStringPool::instance()) {}
 
 void JfrRecorderService::start() {
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -78,8 +78,8 @@
                                                         _id(0),
                                                         _nr_of_frames(0),
                                                         _hash(0),
+                                                        _max_frames(max_frames),
                                                         _reached_root(false),
-                                                        _max_frames(max_frames),
                                                         _lineno(false) {}
   bool record_thread(JavaThread& thread, frame& frame);
   bool record_safe(JavaThread* thread, int skip, bool leakp = false);
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -281,6 +281,7 @@
     if (log_is_enabled(Info, exceptions)) {
       ResourceMark rm;
       stringStream tempst;
+      assert(cm->method() != NULL, "Unexpected null method()");
       tempst.print("compiled method <%s>\n"
                    " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
                    cm->method()->print_value_string(), p2i(pc), p2i(thread));
--- a/src/hotspot/share/libadt/dict.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/libadt/dict.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -54,8 +54,8 @@
 // doubled in size; the total amount of EXTRA times all hash functions are
 // computed for the doubling is no more than the current size - thus the
 // doubling in size costs no more than a constant factor in speed.
-Dict::Dict(CmpKey initcmp, Hash inithash) : _hash(inithash), _cmp(initcmp),
-  _arena(Thread::current()->resource_area()) {
+Dict::Dict(CmpKey initcmp, Hash inithash) : _arena(Thread::current()->resource_area()),
+  _hash(inithash), _cmp(initcmp) {
   int i;
 
   // Precompute table of null character hashes
@@ -74,7 +74,7 @@
 }
 
 Dict::Dict(CmpKey initcmp, Hash inithash, Arena *arena, int size)
-: _hash(inithash), _cmp(initcmp), _arena(arena) {
+: _arena(arena), _hash(inithash), _cmp(initcmp) {
   int i;
 
   // Precompute table of null character hashes
@@ -161,7 +161,7 @@
 
 //------------------------------Dict-----------------------------------------
 // Deep copy a dictionary.
-Dict::Dict( const Dict &d ) : _size(d._size), _cnt(d._cnt), _hash(d._hash),_cmp(d._cmp), _arena(d._arena) {
+Dict::Dict( const Dict &d ) : _arena(d._arena), _size(d._size), _cnt(d._cnt), _hash(d._hash), _cmp(d._cmp) {
   _bin = (bucket*)_arena->Amalloc_4(sizeof(bucket)*_size);
   memcpy( (void*)_bin, (void*)d._bin, sizeof(bucket)*_size );
   for( uint i=0; i<_size; i++ ) {
--- a/src/hotspot/share/logging/logFileOutput.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/logging/logFileOutput.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -44,9 +44,9 @@
 
 LogFileOutput::LogFileOutput(const char* name)
     : LogFileStreamOutput(NULL), _name(os::strdup_check_oom(name, mtLogging)),
-      _file_name(NULL), _archive_name(NULL), _archive_name_len(0),
-      _rotate_size(DefaultFileSize), _file_count(DefaultFileCount),
-      _current_size(0), _current_file(0), _rotation_semaphore(1) {
+      _file_name(NULL), _archive_name(NULL), _current_file(0),
+      _file_count(DefaultFileCount), _archive_name_len(0),
+      _rotate_size(DefaultFileSize), _current_size(0), _rotation_semaphore(1) {
   assert(strstr(name, Prefix) == name, "invalid output name '%s': missing prefix: %s", name, Prefix);
   _file_name = make_file_name(name + strlen(Prefix), _pid_str, _vm_start_time_str);
 }
--- a/src/hotspot/share/logging/logMessageBuffer.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/logging/logMessageBuffer.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -72,7 +72,7 @@
 
    public:
     Iterator(const LogMessageBuffer& message, LogLevelType level, LogDecorations& decorations)
-        : _message(message), _level(level), _decorations(decorations), _current_line_index(0) {
+        : _message(message), _current_line_index(0), _level(level), _decorations(decorations) {
       skip_messages_with_finer_level();
     }
 
--- a/src/hotspot/share/memory/heapInspection.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/heapInspection.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -465,7 +465,7 @@
   }
 }
 
-static void print_interface(outputStream* st, Klass* intf_klass, const char* intf_type, int indent) {
+static void print_interface(outputStream* st, InstanceKlass* intf_klass, const char* intf_type, int indent) {
   print_indent(st, indent);
   st->print("  implements ");
   print_classname(st, intf_klass);
@@ -501,13 +501,13 @@
 
   // Print any interfaces the class has.
   if (print_interfaces) {
-    Array<Klass*>* local_intfs = klass->local_interfaces();
-    Array<Klass*>* trans_intfs = klass->transitive_interfaces();
+    Array<InstanceKlass*>* local_intfs = klass->local_interfaces();
+    Array<InstanceKlass*>* trans_intfs = klass->transitive_interfaces();
     for (int i = 0; i < local_intfs->length(); i++) {
       print_interface(st, local_intfs->at(i), "declared", indent);
     }
     for (int i = 0; i < trans_intfs->length(); i++) {
-      Klass* trans_interface = trans_intfs->at(i);
+      InstanceKlass* trans_interface = trans_intfs->at(i);
       // Only print transitive interfaces if they are not also declared.
       if (!local_intfs->contains(trans_interface)) {
         print_interface(st, trans_interface, "inherited", indent);
--- a/src/hotspot/share/memory/heapInspection.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/heapInspection.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -192,7 +192,7 @@
 
  public:
   KlassInfoEntry(Klass* k, KlassInfoEntry* next) :
-    _klass(k), _instance_count(0), _instance_words(0), _next(next), _index(-1),
+    _next(next), _klass(k), _instance_count(0), _instance_words(0), _index(-1),
     _do_print(false), _subclasses(NULL)
   {}
   ~KlassInfoEntry();
--- a/src/hotspot/share/memory/metaspace.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -1364,8 +1364,8 @@
 // ClassLoaderMetaspace
 
 ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
-  : _lock(lock)
-  , _space_type(type)
+  : _space_type(type)
+  , _lock(lock)
   , _vsm(NULL)
   , _class_vsm(NULL)
 {
--- a/src/hotspot/share/memory/metaspace/metachunk.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/metachunk.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -53,13 +53,13 @@
 Metachunk::Metachunk(ChunkIndex chunktype, bool is_class, size_t word_size,
                      VirtualSpaceNode* container)
     : Metabase<Metachunk>(word_size),
+    _container(container),
+    _top(NULL),
+    _sentinel(CHUNK_SENTINEL),
     _chunk_type(chunktype),
     _is_class(is_class),
-    _sentinel(CHUNK_SENTINEL),
     _origin(origin_normal),
-    _use_count(0),
-    _top(NULL),
-    _container(container)
+    _use_count(0)
 {
   _top = initial_top();
   set_is_tagged_free(false);
--- a/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/metaspaceDCmd.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -35,14 +35,14 @@
   : DCmdWithParser(output, heap)
   , _basic("basic", "Prints a basic summary (does not need a safepoint).", "BOOLEAN", false, "false")
   , _show_loaders("show-loaders", "Shows usage by class loader.", "BOOLEAN", false, "false")
-  , _show_classes("show-classes", "If show-loaders is set, shows loaded classes for each loader.", "BOOLEAN", false, "false")
+  , _by_spacetype("by-spacetype", "Break down numbers by loader type.", "BOOLEAN", false, "false")
   , _by_chunktype("by-chunktype", "Break down numbers by chunk type.", "BOOLEAN", false, "false")
-  , _by_spacetype("by-spacetype", "Break down numbers by loader type.", "BOOLEAN", false, "false")
   , _show_vslist("vslist", "Shows details about the underlying virtual space.", "BOOLEAN", false, "false")
   , _show_vsmap("vsmap", "Shows chunk composition of the underlying virtual spaces", "BOOLEAN", false, "false")
   , _scale("scale", "Memory usage in which to scale. Valid values are: 1, KB, MB or GB (fixed scale) "
            "or \"dynamic\" for a dynamically choosen scale.",
-     "STRING", false, "dynamic")
+           "STRING", false, "dynamic")
+  , _show_classes("show-classes", "If show-loaders is set, shows loaded classes for each loader.", "BOOLEAN", false, "false")
 {
   _dcmdparser.add_dcmd_option(&_basic);
   _dcmdparser.add_dcmd_option(&_show_loaders);
--- a/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -38,7 +38,7 @@
     bool do_print_classes, bool break_down_by_chunktype)
 : _out(out), _scale(scale), _do_print(do_print), _do_print_classes(do_print_classes)
 , _break_down_by_chunktype(break_down_by_chunktype)
-, _num_loaders(0), _num_loaders_unloading(0), _num_loaders_without_metaspace(0)
+, _num_loaders(0), _num_loaders_without_metaspace(0), _num_loaders_unloading(0)
 {
   memset(_num_loaders_by_spacetype, 0, sizeof(_num_loaders_by_spacetype));
 }
--- a/src/hotspot/share/memory/metaspace/spaceManager.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/spaceManager.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -228,16 +228,15 @@
 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
                            Metaspace::MetaspaceType space_type,//
                            Mutex* lock) :
+  _lock(lock),
   _mdtype(mdtype),
   _space_type(space_type),
+  _chunk_list(NULL),
+  _current_chunk(NULL),
+  _overhead_words(0),
   _capacity_words(0),
   _used_words(0),
-  _overhead_words(0),
-  _block_freelists(NULL),
-  _lock(lock),
-  _chunk_list(NULL),
-  _current_chunk(NULL)
-{
+  _block_freelists(NULL) {
   Metadebug::init_allocation_fail_alot_count();
   memset(_num_chunks_by_type, 0, sizeof(_num_chunks_by_type));
   log_trace(gc, metaspace, freelist)("SpaceManager(): " PTR_FORMAT, p2i(this));
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -149,9 +149,9 @@
 }
 
 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
-                                   _is_class(false),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
+                                   _is_class(false),
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
@@ -161,9 +161,9 @@
 }
 
 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
-                                   _is_class(true),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
+                                   _is_class(true),
                                    _reserved_words(0),
                                    _committed_words(0),
                                    _virtual_space_count(0) {
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -57,7 +57,7 @@
 
 // byte_size is the size of the associated virtualspace.
 VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) :
-    _is_class(is_class), _top(NULL), _next(NULL), _rs(), _container_count(0), _occupancy_map(NULL) {
+    _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) {
   assert_is_aligned(bytes, Metaspace::reserve_alignment());
   bool large_pages = should_commit_large_pages_when_reserving(bytes);
   _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -79,7 +79,7 @@
 
   VirtualSpaceNode(bool is_class, size_t byte_size);
   VirtualSpaceNode(bool is_class, ReservedSpace rs) :
-    _is_class(is_class), _top(NULL), _next(NULL), _rs(rs), _container_count(0), _occupancy_map(NULL) {}
+    _next(NULL), _is_class(is_class), _rs(rs), _top(NULL), _container_count(0), _occupancy_map(NULL) {}
   ~VirtualSpaceNode();
 
   // Convenience functions for logical bottom and end
--- a/src/hotspot/share/memory/universe.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -135,6 +135,7 @@
 Array<int>* Universe::_the_empty_int_array            = NULL;
 Array<u2>* Universe::_the_empty_short_array           = NULL;
 Array<Klass*>* Universe::_the_empty_klass_array     = NULL;
+Array<InstanceKlass*>* Universe::_the_empty_instance_klass_array  = NULL;
 Array<Method*>* Universe::_the_empty_method_array   = NULL;
 
 // These variables are guarded by FullGCALot_lock.
@@ -234,6 +235,7 @@
   it->push(&_the_empty_int_array);
   it->push(&_the_empty_short_array);
   it->push(&_the_empty_klass_array);
+  it->push(&_the_empty_instance_klass_array);
   it->push(&_the_empty_method_array);
   it->push(&_the_array_interfaces_array);
 
@@ -287,6 +289,7 @@
   f->do_ptr((void**)&_the_empty_short_array);
   f->do_ptr((void**)&_the_empty_method_array);
   f->do_ptr((void**)&_the_empty_klass_array);
+  f->do_ptr((void**)&_the_empty_instance_klass_array);
   _finalizer_register_cache->serialize(f);
   _loader_addClass_cache->serialize(f);
   _pd_implies_cache->serialize(f);
@@ -349,11 +352,12 @@
 
         ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
 
-        _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
-        _the_empty_int_array        = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
-        _the_empty_short_array      = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
-        _the_empty_method_array     = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
-        _the_empty_klass_array      = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
+        _the_array_interfaces_array     = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
+        _the_empty_int_array            = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
+        _the_empty_short_array          = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
+        _the_empty_method_array         = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
+        _the_empty_klass_array          = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
+        _the_empty_instance_klass_array = MetadataFactory::new_array<InstanceKlass*>(null_cld, 0, CHECK);
       }
     }
 
--- a/src/hotspot/share/memory/universe.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/universe.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -161,10 +161,11 @@
   // preallocated cause message for delayed StackOverflowError
   static oop          _delayed_stack_overflow_error_message;
 
-  static Array<int>*       _the_empty_int_array;    // Canonicalized int array
-  static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
-  static Array<Klass*>*  _the_empty_klass_array;  // Canonicalized klass obj array
-  static Array<Method*>* _the_empty_method_array; // Canonicalized method obj array
+  static Array<int>*            _the_empty_int_array;            // Canonicalized int array
+  static Array<u2>*             _the_empty_short_array;          // Canonicalized short array
+  static Array<Klass*>*         _the_empty_klass_array;          // Canonicalized klass array
+  static Array<InstanceKlass*>* _the_empty_instance_klass_array; // Canonicalized instance klass array
+  static Array<Method*>*        _the_empty_method_array;         // Canonicalized method array
 
   static Array<Klass*>*  _the_array_interfaces_array;
 
@@ -357,10 +358,11 @@
   static bool         has_reference_pending_list();
   static oop          swap_reference_pending_list(oop list);
 
-  static Array<int>*       the_empty_int_array()    { return _the_empty_int_array; }
-  static Array<u2>*        the_empty_short_array()  { return _the_empty_short_array; }
-  static Array<Method*>* the_empty_method_array() { return _the_empty_method_array; }
-  static Array<Klass*>*  the_empty_klass_array()  { return _the_empty_klass_array; }
+  static Array<int>*             the_empty_int_array()    { return _the_empty_int_array; }
+  static Array<u2>*              the_empty_short_array()  { return _the_empty_short_array; }
+  static Array<Method*>*         the_empty_method_array() { return _the_empty_method_array; }
+  static Array<Klass*>*          the_empty_klass_array()  { return _the_empty_klass_array; }
+  static Array<InstanceKlass*>*  the_empty_instance_klass_array() { return _the_empty_instance_klass_array; }
 
   // OutOfMemoryError support. Returns an error with the required message. The returned error
   // may or may not have a backtrace. If error has a backtrace then the stack trace is already
--- a/src/hotspot/share/memory/virtualspace.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/memory/virtualspace.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -36,7 +36,7 @@
 
 // Dummy constructor
 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
-    _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
+    _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
--- a/src/hotspot/share/oops/accessDecorators.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/accessDecorators.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -212,8 +212,15 @@
                                                     ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
                                                     ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
 
+// == Resolve barrier decorators ==
+// * ACCESS_READ: Indicate that the resolved object is accessed read-only. This allows the GC
+//   backend to use weaker and more efficient barriers.
+// * ACCESS_WRITE: Indicate that the resolved object is used for write access.
+const DecoratorSet ACCESS_READ                    = UCONST64(1) << 29;
+const DecoratorSet ACCESS_WRITE                   = UCONST64(1) << 30;
+
 // Keep track of the last decorator.
-const DecoratorSet DECORATOR_LAST = UCONST64(1) << 28;
+const DecoratorSet DECORATOR_LAST = UCONST64(1) << 30;
 
 namespace AccessInternal {
   // This class adds implied decorators that follow according to decorator rules.
--- a/src/hotspot/share/oops/arrayKlass.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/arrayKlass.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -49,7 +49,7 @@
 }
 
 
-Klass* ArrayKlass::java_super() const {
+InstanceKlass* ArrayKlass::java_super() const {
   if (super() == NULL)  return NULL;  // bootstrap case
   // Array klasses have primary supertypes which are not reported to Java.
   // Example super chain:  String[][] -> Object[][] -> Object[] -> Object
@@ -90,7 +90,7 @@
     // the vtable of klass Object.
     set_vtable_length(Universe::base_vtable_size());
     set_name(name);
-    set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass());
+    set_super(Universe::is_bootstrapping() ? NULL : SystemDictionary::Object_klass());
     set_layout_helper(Klass::_lh_neutral_value);
     set_is_cloneable(); // All arrays are considered to be cloneable (See JLS 20.1.5)
     JFR_ONLY(INIT_ID(this);)
@@ -113,7 +113,7 @@
 }
 
 GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots,
-                                                            Array<Klass*>* transitive_interfaces) {
+                                                            Array<InstanceKlass*>* transitive_interfaces) {
   // interfaces = { cloneable_klass, serializable_klass };
   assert(num_extra_slots == 0, "sanity of primitive array type");
   assert(transitive_interfaces == NULL, "sanity");
--- a/src/hotspot/share/oops/arrayKlass.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/arrayKlass.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -73,7 +73,7 @@
   // type of elements (T_OBJECT for both oop arrays and array-arrays)
   BasicType element_type() const        { return layout_helper_element_type(layout_helper()); }
 
-  virtual Klass* java_super() const;//{ return SystemDictionary::Object_klass(); }
+  virtual InstanceKlass* java_super() const;//{ return SystemDictionary::Object_klass(); }
 
   // Allocation
   // Sizes points to the first dimension of the array, subsequent dimensions
@@ -100,7 +100,7 @@
   }
 
   GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots,
-                                                  Array<Klass*>* transitive_interfaces);
+                                                  Array<InstanceKlass*>* transitive_interfaces);
   bool compute_is_subtype_of(Klass* k);
 
   // Sizing
--- a/src/hotspot/share/oops/constantPool.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/constantPool.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -2490,9 +2490,9 @@
   if (has_preresolution()) st->print("/preresolution");
   if (operands() != NULL)  st->print("/operands[%d]", operands()->length());
   print_address_on(st);
-  st->print(" for ");
-  pool_holder()->print_value_on(st);
   if (pool_holder() != NULL) {
+    st->print(" for ");
+    pool_holder()->print_value_on(st);
     bool extra = (pool_holder()->constants() != this);
     if (extra)  st->print(" (extra)");
   }
--- a/src/hotspot/share/oops/constantPool.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/constantPool.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -966,7 +966,7 @@
   void       set_value(u2 value)          { _value = value; }
 
   SymbolHashMapEntry(unsigned int hash, Symbol* symbol, u2 value)
-    : _hash(hash), _symbol(symbol), _value(value), _next(NULL) {}
+    : _hash(hash), _next(NULL), _symbol(symbol), _value(value) {}
 
 }; // End SymbolHashMapEntry class
 
--- a/src/hotspot/share/oops/cpCache.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/cpCache.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -493,7 +493,7 @@
       switch (invoke_code) {
       case Bytecodes::_invokeinterface:
         assert(f1->is_klass(), "");
-        return klassItable::method_for_itable_index((Klass*)f1, f2_as_index());
+        return klassItable::method_for_itable_index((InstanceKlass*)f1, f2_as_index());
       case Bytecodes::_invokestatic:
       case Bytecodes::_invokespecial:
         assert(!has_appendix(), "");
--- a/src/hotspot/share/oops/instanceKlass.cpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Thu Aug 09 22:06:11 2018 +0200
@@ -402,13 +402,13 @@
 
 InstanceKlass::InstanceKlass(const ClassFileParser& parser, unsigned kind, KlassID id) :
   Klass(id),
+  _nest_members(NULL),
+  _nest_host_index(0),
+  _nest_host(NULL),
   _static_field_size(parser.static_field_size()),
   _nonstatic_oop_map_size(nonstatic_oop_map_size(parser.total_oop_map_count())),
   _itable_len(parser.itable_size()),
-  _reference_type(parser.reference_type()),
-  _nest_members(NULL),
-  _nest_host_index(0),
-  _nest_host(NULL) {
+  _reference_type(parser.reference_type()) {
     set_vtable_length(parser.vtable_size());
     set_kind(kind);
     set_access_flags(parser.access_flags());
@@ -439,24 +439,24 @@
 
 void InstanceKlass::deallocate_interfaces(ClassLoaderData* loader_data,
                                           const Klass* super_klass,
-                                          Array<Klass*>* local_interfaces,
-                                          Array<Klass*>* transitive_interfaces) {
+                                          Array<InstanceKlass*>* local_interfaces,
+                                          Array<InstanceKlass*>* transitive_interfaces) {
   // Only deallocate transitive interfaces if not empty, same as super class
   // or same as local interfaces.  See code in parseClassFile.
-  Array<Klass*>* ti = transitive_interfaces;
-  if (ti != Universe::the_empty_klass_array() && ti != local_interfaces) {
+  Array<InstanceKlass*>* ti = transitive_interfaces;
+  if (ti != Universe::the_empty_instance_klass_array() && ti != local_interfaces) {
     // check that the interfaces don't come from super class
-    Array<Klass*>* sti = (super_klass == NULL) ? NULL :
+    Array<InstanceKlass*>* sti = (super_klass == NULL) ? NULL :
                     InstanceKlass::cast(super_klass)->transitive_interfaces();
     if (ti != sti && ti != NULL && !ti->is_shared()) {
-      MetadataFactory::free_array<Klass*>(loader_data, ti);
+      MetadataFactory::free_array<InstanceKlass*>(loader_data, ti);
     }
   }
 
   // local interfaces can be empty
-  if (local_interfaces != Universe::the_empty_klass_array() &&
+  if (local_interfaces != Universe::the_empty_instance_klass_array() &&
       local_interfaces != NULL && !local_interfaces->is_shared()) {
-    MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
+    MetadataFactory::free_array<InstanceKlass*>(loader_data, local_interfaces);
   }
 }
 
@@ -517,7 +517,8 @@
   // interfaces.
   if (secondary_supers() != NULL &&
       secondary_supers() != Universe::the_empty_klass_array() &&
-      secondary_supers() != transitive_interfaces() &&
+      // see comments in compute_secondary_supers about the following cast
+      (address)(secondary_supers()) != (address)(transitive_interfaces()) &&
       !secondary_supers()->is_shared()) {
     MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
   }
@@ -755,10 +756,10 @@
   }
 
   // link all interfaces implemented by this class before linking this class
-  Array<Klass*>* interfaces = local_interfaces();
+  Array<InstanceKlass*>* interfaces = local_interfaces();
   int num_interfaces = interfaces->length();
   for (int index = 0; index < num_interfaces; index++) {
-    InstanceKlass* interk = InstanceKlass::cast(interfaces->at(index));
+    InstanceKlass* interk = interfaces->at(index);
     interk->link_class_impl(throw_verifyerror, CHECK_false);
   }
 
@@ -872,8 +873,7 @@
 void InstanceKlass::initialize_super_interfaces(TRAPS) {
   assert (has_nonstatic_concrete_methods(), "caller should have checked this");
   for (int i = 0; i < local_interfaces()->length(); ++i) {
-    Klass* iface = local_interfaces()->at(i);
-    InstanceKlass* ik = InstanceKlass::cast(iface);
+    InstanceKlass* ik = local_interfaces()->at(i);
 
     // Initialization is depth first search ie. we start with top of the inheritance tree
     // has_nonstatic_concrete_methods drives searching superinterfaces since it
@@ -1117,18 +1117,21 @@
 }
 
 GrowableArray<Klass*>* InstanceKlass::compute_secondary_supers(int num_extra_slots,
-                                                               Array<Klass*>* transitive_interfaces) {
+                                                               Array<InstanceKlass*>* transitive_interfaces) {
   // The secondaries are the implemented interfaces.
-  Array<Klass*>* interfaces = transitive_interfaces;
+  Array<InstanceKlass*>* interfaces = transitive_interfaces;
   int num_secondaries = num_extra_slots + interfaces->length();
   if (num_secondaries == 0) {
     // Must share this for correct bootstrapping!
     set_secondary_supers(Universe::the_empty_klass_array());
     return NULL;
   } else if (num_extra_slots == 0) {
-    // The secondary super list is exactly the same as the transitive interfaces.
+    // The secondary super list is exactly the same as the transitive interfaces, so
+    // let's use it instead of making a copy.
     // Redefine classes has to be careful not to delete this!
-    set_secondary_supers(interfaces);
+    // We need the cast because Array<Klass*> is NOT a supertype of Array<InstanceKlass*>,
+    // (but it's safe to do here because we won't write into _secondary_supers from this point on).
+    set_secondary_supers((Array<Klass*>*)(address)interfaces);
     return NULL;
   } else {
     // Copy transitive interfaces to a temporary growable array to be constructed
@@ -1791,11 +1794,11 @@
 Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
                                                        Symbol* signature,
                                                        DefaultsLookupMode defaults_mode) const {
-  Array<Klass*>* all_ifs = transitive_interfaces();
+  Array<InstanceKlass*>* all_ifs = transitive_interfaces();
   int num_ifs = all_ifs->length();
   InstanceKlass *ik = NULL;
   for (int i = 0; i < num_ifs; i++) {
-    ik = InstanceKlass::cast(all_ifs->at(i));
+    ik = all_ifs->at(i);
     Method* m = ik->lookup_method(name, signature);
     if (m != NULL && m->is_public() && !m->is_static() &&
         ((defaults_mode != skip_defaults) || !m->is_default_method())) {
@@ -2142,11 +2145,11 @@
     return false;
   }
 
-  Array<Klass*>* local_interfaces = this->local_interfaces();
+  Array<InstanceKlass*>* local_interfaces = this->local_interfaces();
   if (local_interfaces != NULL) {
     int length = local_interfaces->length();
     for (int i = 0; i < length; i++) {
-      InstanceKlass* intf = InstanceKlass::cast(local_interfaces->at(i));
+      InstanceKlass* intf = local_interfaces->at(i);
       if (!intf->has_passed_fingerprint_check()) {
         ResourceMark rm;
         log_trace(class, fingerprint)("%s : interface %s not fingerprinted", external_name(), intf->external_name());
@@ -2353,10 +2356,10 @@
       }
     }
     if (!bad) {
-      Array<Klass*>* interfaces = transitive_interfaces();
+      Array<InstanceKlass*>* interfaces = transitive_interfaces();
       for (int i = 0; i < interfaces->length(); i++) {
-        Klass* iface = interfaces->at(i);
-        if (InstanceKlass::cast(iface)->is_in_error_state()) {
+        InstanceKlass* iface = interfaces->at(i);
+        if (iface->is_in_error_state()) {
           bad = true;
           break;
         }
@@ -3254,6 +3257,12 @@
   }
 }
 
+bool InstanceKlass::verify_itable_index(int i) {
+  int method_count = klassItable::method_count_for_interface(this);
+  assert(i >= 0 && i < method_count, "index out of bounds");
+  return true;
+}
+
 #endif //PRODUCT
 
 void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) {
@@ -3498,18 +3507,18 @@
 
   // Verify local interfaces
   if (local_interfaces()) {
-    Array<Klass*>* local_interfaces = this->local_interfaces();
+    Array<InstanceKlass*>* local_interfaces = this->local_interfaces();
     for (int j = 0; j < local_interfaces->length(); j++) {
-      Klass* e = local_interfaces->at(j);
+      InstanceKlass* e = local_interfaces->at(j);
       guarantee(e->is_klass() && e->is_interface(), "invalid local interface");
     }
   }
 
   // Verify transitive interfaces
   if (transitive_interfaces() != NULL) {
-    Array<Klass*>* transitive_interfaces = this->transitive_interfaces();
+    Array<InstanceKlass*>* transitive_interfaces = this->transitive_interfaces();
     for (int j = 0; j < transitive_interfaces->length(); j++) {
-      Klass* e = transitive_interfaces->at(j);
+      InstanceKlass* e = transitive_interfaces->at(j);
       guarantee(e->is_klass() && e->is_interface(), "invalid transitive interface");
     }
   }
@@ -3656,7 +3665,7 @@
 // unloading only. Also resets the flag to false. purge_previous_version
 // will set the flag to true if there are any left, i.e., if there's any
 // work to do for next time. This is to avoid the expensive code cache
-// walk in CLDG::do_unloading().
+// walk in CLDG::clean_deallocate_lists().
 bool InstanceKlass::has_previous_versions_and_reset() {
   bool ret = _has_previous_versions;
   log_trace(redefine, class, iklass, purge)("Class unloading: has_previous_versions = %s",
--- a/src/hotspot/share/oops/instanceKlass.hpp	Thu Aug 02 22:06:18 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Thu Aug 09 22:06:11 2018 +0200
@@ -279,10 +279,10 @@
   Array<Method*>* _methods;
   // Default Method Array, concrete methods inherited from interfaces
   Array<Method*>* _default_methods;
-  // Interface (Klass*s) this class declares locally to implement.
-  Array<Klass*>* _local_interfaces;
-  // Interface (Klass*s) this class implements transitively.
-  Array<Klass*>* _transitive_interfaces;
+  // Interfaces (InstanceKlass*s) this class declares locally to implement.
+  Array<InstanceKlass*>* _local_interfaces;
+  // Interfaces (InstanceKlass*s) this class implements transitively.
+  Array<InstanceKlass*>* _transitive_interfaces;
   // Int array containing the original order of method in the class file (for JVMTI).
   Array<int>*     _method_ordering;
   // Int array containing the vtable_indices for default_methods
@@ -415,13 +415,13 @@
   Array<int>* create_new_default_vtable_indices(int len, TRAPS);
 
   // interfaces
-  Array<Klass*>* local_interfaces() const          { return _local_interfaces; }
-  void set_local_interfaces(Array<Klass*>* a)      {
+  Array<InstanceKlass*>* local_interfaces() const          { return _local_interfaces; }