changeset 50421:3a28f370bb31 switch

Automatic merge with default
author mcimadamore
date Thu, 26 Apr 2018 22:11:15 +0200
parents d557ba6ef129 a09af8ef8e5c
children 2b2f7f718910
files make/CompileJavaModules.gmk src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.cpp src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.hpp src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.cpp src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.hpp src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.cpp src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.hpp src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.cpp src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.hpp src/hotspot/share/runtime/commandLineFlagConstraintList.cpp src/hotspot/share/runtime/commandLineFlagConstraintList.hpp src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.cpp src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.hpp src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.hpp src/hotspot/share/runtime/commandLineFlagRangeList.cpp src/hotspot/share/runtime/commandLineFlagRangeList.hpp src/hotspot/share/runtime/commandLineFlagWriteableList.cpp src/hotspot/share/runtime/commandLineFlagWriteableList.hpp src/java.base/windows/classes/java/net/DualStackPlainSocketImpl.java src/java.base/windows/native/libnet/DualStackPlainSocketImpl.c src/java.base/windows/native/libnet/portconfig.c src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symtab.java src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapImplTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapLargeTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicSetTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EquivalenceTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/PairTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicMap.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicMapImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicSet.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/Equivalence.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/MapCursor.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/Pair.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableEconomicMap.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableEconomicSet.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableMapCursor.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/package-info.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/ModuleAPI.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/phases/CoreCompilerConfiguration.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/Management.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/.checkstyle_checks.xml src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotSuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HotSpotGraalMBeanTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CoreCompilerConfigurationFactory.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalMBean.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/OptionValuesAccess.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64ReadNode.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64ReadReplacementPhase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/JDK9Method.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual.bench/.checkstyle.exclude src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/.checkstyle_checks.xml src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/ComparableWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/LocationIdentity.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/Pointer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/PointerBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/SignedWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/UnsignedWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/WordBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/WordFactory.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/ImplementedMethods.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/VisibleMemberMap.java test/hotspot/jtreg/runtime/SharedArchiveFile/org/omg/CORBA/Context.jasm test/hotspot/jtreg/runtime/appcds/javaldr/CheckAnonymousClass.java test/hotspot/jtreg/runtime/appcds/jigsaw/classpathtests/src/com/sun/tools/javac/Main2.jasm test/hotspot/jtreg/runtime/appcds/jigsaw/classpathtests/src/javax/activation/UnsupportedDataTypeException2.jasm test/hotspot/jtreg/runtime/appcds/jigsaw/overridetests/src/java.activation/javax/activation/UnsupportedDataTypeException.java test/hotspot/jtreg/runtime/appcds/jigsaw/overridetests/src/java.activation/module-info.java test/hotspot/jtreg/runtime/appcds/test-classes/javax/activation/MimeType.jasm
diffstat 827 files changed, 26217 insertions(+), 19259 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Apr 21 16:33:20 2018 -0700
+++ b/.hgtags	Thu Apr 26 22:11:15 2018 +0200
@@ -482,3 +482,4 @@
 0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
 6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
 69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
+e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
--- a/make/CompileJavaModules.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/CompileJavaModules.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -441,7 +441,7 @@
     #
 
 jdk.internal.vm.compiler_EXCLUDES += \
-    org.graalvm.collections.test \
+    jdk.internal.vm.compiler.collections.test \
     org.graalvm.compiler.core.match.processor \
     org.graalvm.compiler.nodeinfo.processor \
     org.graalvm.compiler.options.processor \
--- a/make/CompileToolsHotspot.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/CompileToolsHotspot.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -47,8 +47,8 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.word/src \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.core/src \
           $(SRC_DIR)/org.graalvm.compiler.core.common/src \
           $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
@@ -102,7 +102,7 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.options/src \
           $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
           $(SRC_DIR)/org.graalvm.util/src \
@@ -118,8 +118,8 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.word/src \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
           $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
           $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
--- a/make/Images.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/Images.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -117,7 +117,7 @@
 
 JLINK_TOOL := $(JLINK) -J-Djlink.debug=true \
     --module-path $(IMAGES_OUTPUTDIR)/jmods \
-    --endian $(OPENJDK_BUILD_CPU_ENDIAN) \
+    --endian $(OPENJDK_TARGET_CPU_ENDIAN) \
     --release-info $(BASE_RELEASE_FILE) \
     --order-resources=$(call CommaList, $(JLINK_ORDER_RESOURCES)) \
     --dedup-legal-notices=error-if-not-same-content \
--- a/make/Init.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/Init.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -310,9 +310,13 @@
           ifneq ($(PARALLEL_TARGETS), )
 	    $(call StartGlobalTimer)
 	    $(call PrepareSmartJavac)
+            # JOBS will only be empty for a bootcycle-images recursive call
+            # or if specified via a make argument directly. In those cases
+            # treat it as NOT using jobs at all.
 	    ( cd $(TOPDIR) && \
 	        $(NICE) $(MAKE) $(MAKE_ARGS) $(OUTPUT_SYNC_FLAG) \
-	            -j $(JOBS) -f make/Main.gmk $(USER_MAKE_VARS) \
+                    $(if $(JOBS), -j $(JOBS)) \
+	            -f make/Main.gmk $(USER_MAKE_VARS) \
 	            $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
 	        ( exitcode=$$? && \
 	        $(PRINTF) "\nERROR: Build failed for $(TARGET_DESCRIPTION) (exit code $$exitcode) \n" \
--- a/make/autoconf/flags.m4	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/autoconf/flags.m4	Thu Apr 26 22:11:15 2018 +0200
@@ -233,15 +233,17 @@
   # The sysroot flags are needed for configure to be able to run the compilers
   FLAGS_SETUP_SYSROOT_FLAGS
 
+  # For solstudio and xlc, the word size flag is required for correct behavior.
+  # For clang/gcc, the flag is only strictly required for reduced builds, but
+  # set it always where possible (x86, sparc and ppc).
   if test "x$TOOLCHAIN_TYPE" = xxlc; then
     MACHINE_FLAG="-q${OPENJDK_TARGET_CPU_BITS}"
-  elif test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
-    if test "x$OPENJDK_TARGET_CPU" != xaarch64 &&
-       test "x$OPENJDK_TARGET_CPU" != xarm &&
-       test "x$OPENJDK_TARGET_CPU" != xmips &&
-       test "x$OPENJDK_TARGET_CPU" != xmipsel &&
-       test "x$OPENJDK_TARGET_CPU" != xmips64 &&
-       test "x$OPENJDK_TARGET_CPU" != xmips64el; then
+  elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
+    MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
+  elif test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
+    if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86 ||
+        test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc ||
+        test "x$OPENJDK_TARGET_CPU_ARCH" = xppc; then
       MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
     fi
   fi
--- a/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Thu Apr 26 22:11:15 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -318,16 +318,17 @@
         }
         for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
             String key = it.next();
-            if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
+                if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
                     || key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
                 @SuppressWarnings("unchecked")
                 Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
+
                 // Convert key/value pairs to an array.
                 String[] names = new String[ZONE_NAME_KEYS.length];
                 int ix = 0;
                 for (String nameKey : ZONE_NAME_KEYS) {
                     String name = nameMap.get(nameKey);
-                    if (name == null) {
+                    if (name == null && parentsMap != null) {
                         @SuppressWarnings("unchecked")
                         Map<String, String> parentNames = (Map<String, String>) parentsMap.get(key);
                         if (parentNames != null) {
@@ -357,29 +358,6 @@
                             }
                         }
                     }
-                    // If there are still any nulls, try filling in them from en data.
-                    if (hasNulls(names) && !id.equals("en")) {
-                        @SuppressWarnings("unchecked")
-                        String[] enNames = (String[]) Bundle.getBundle("en").getTargetMap().get(key);
-                        if (enNames == null) {
-                            if (metaKey != null) {
-                                @SuppressWarnings("unchecked")
-                                String[] metaNames = (String[]) Bundle.getBundle("en").getTargetMap().get(metaKey);
-                                enNames = metaNames;
-                            }
-                        }
-                        if (enNames != null) {
-                            for (int i = 0; i < names.length; i++) {
-                                if (names[i] == null) {
-                                    names[i] = enNames[i];
-                                }
-                            }
-                        }
-                        // If there are still nulls, give up names.
-                        if (hasNulls(names)) {
-                            names = null;
-                        }
-                    }
                 }
                 // replace the Map with the array
                 if (names != null) {
@@ -662,12 +640,12 @@
                     if (CLDRConverter.handlerMetaZones.get(tz).equals(meta)) {
                         tzid = tz;
                         break;
-                        }
                     }
                 }
+            }
         } else {
             tzid = key.substring(CLDRConverter.TIMEZONE_ID_PREFIX.length());
-    }
+        }
 
         if (tzid != null) {
             for (Object[] jreZone : jreTimeZoneNames) {
@@ -676,13 +654,13 @@
                         if (map.get(ZONE_NAME_KEYS[i]) == null) {
                             String[] jreNames = (String[])jreZone[1];
                             map.put(ZONE_NAME_KEYS[i], jreNames[i]);
+                        }
+                    }
+                    break;
                 }
             }
-                    break;
         }
     }
-            }
-        }
 
     private void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
         switch (cldrLetter) {
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Thu Apr 26 22:11:15 2018 +0200
@@ -31,6 +31,7 @@
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.nio.file.*;
+import java.text.MessageFormat;
 import java.time.*;
 import java.util.*;
 import java.util.ResourceBundle.Control;
@@ -82,9 +83,11 @@
     static final String CALENDAR_FIRSTDAY_PREFIX = "firstDay.";
     static final String CALENDAR_MINDAYS_PREFIX = "minDays.";
     static final String TIMEZONE_ID_PREFIX = "timezone.id.";
+    static final String EXEMPLAR_CITY_PREFIX = "timezone.excity.";
     static final String ZONE_NAME_PREFIX = "timezone.displayname.";
     static final String METAZONE_ID_PREFIX = "metazone.id.";
     static final String PARENT_LOCALE_PREFIX = "parentLocale.";
+    static final String[] EMPTY_ZONE = {"", "", "", "", "", ""};
 
     private static SupplementDataParseHandler handlerSuppl;
     private static SupplementalMetadataParseHandler handlerSupplMeta;
@@ -662,23 +665,18 @@
                                 Arrays.deepEquals(data,
                                     (String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
                             .findAny();
-                    if (cldrMeta.isPresent()) {
-                        names.put(tzid, cldrMeta.get().getValue());
-                    } else {
+                    cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
                         // check the JRE meta key, add if there is not.
                         Optional<Map.Entry<String[], String>> jreMeta =
                             jreMetaMap.entrySet().stream()
                                 .filter(jm -> Arrays.deepEquals(data, jm.getKey()))
                                 .findAny();
-                        if (jreMeta.isPresent()) {
-                            names.put(tzid, jreMeta.get().getValue());
-                        } else {
-                            String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
-                            names.put(METAZONE_ID_PREFIX + metaName, data);
-                            names.put(tzid, metaName);
-                            jreMetaMap.put(data, metaName);
-                        }
-                    }
+                        jreMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
+                                String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
+                                names.put(METAZONE_ID_PREFIX + metaName, data);
+                                names.put(tzid, metaName);
+                        });
+                    });
                 }
             });
         }
@@ -705,6 +703,26 @@
             }
         });
 
+        // exemplar cities.
+        Map<String, Object> exCities = map.entrySet().stream()
+                .filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
+                .collect(Collectors
+                        .toMap(Map.Entry::getKey, Map.Entry::getValue));
+        names.putAll(exCities);
+
+        if (!id.equals("en") &&
+            !names.isEmpty()) {
+            // CLDR does not have UTC entry, so add it here.
+            names.put("UTC", EMPTY_ZONE);
+
+            // no metazone zones
+            Arrays.asList(handlerMetaZones.get(MetaZonesParseHandler.NO_METAZONE_KEY)
+                .split("\\s")).stream()
+                .forEach(tz -> {
+                    names.put(tz, EMPTY_ZONE);
+                });
+        }
+
         return names;
     }
 
@@ -769,6 +787,10 @@
         "field.hour",
         "timezone.hourFormat",
         "timezone.gmtFormat",
+        "timezone.gmtZeroFormat",
+        "timezone.regionFormat",
+        "timezone.regionFormat.daylight",
+        "timezone.regionFormat.standard",
         "field.minute",
         "field.second",
         "field.zone",
--- a/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java	Thu Apr 26 22:11:15 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,19 +103,30 @@
         case "key":
             // for LocaleNames
             // copy string
-            pushStringEntry(qName, attributes,
-                CLDRConverter.LOCALE_KEY_PREFIX +
-                convertOldKeyName(attributes.getValue("type")));
+            {
+                String key = convertOldKeyName(attributes.getValue("type"));
+                if (key.length() == 2) {
+                    pushStringEntry(qName, attributes,
+                        CLDRConverter.LOCALE_KEY_PREFIX + key);
+                } else {
+                    pushIgnoredContainer(qName);
+                }
+            }
             break;
 
         case "type":
             // for LocaleNames/CalendarNames
             // copy string
-            pushStringEntry(qName, attributes,
-                CLDRConverter.LOCALE_TYPE_PREFIX +
-                convertOldKeyName(attributes.getValue("key")) + "." +
-                attributes.getValue("type"));
-
+            {
+                String key = convertOldKeyName(attributes.getValue("key"));
+                if (key.length() == 2) {
+                    pushStringEntry(qName, attributes,
+                    CLDRConverter.LOCALE_TYPE_PREFIX + key + "." +
+                    attributes.getValue("type"));
+                } else {
+                    pushIgnoredContainer(qName);
+                }
+            }
             break;
 
         //
@@ -445,6 +456,16 @@
         case "gmtFormat":
             pushStringEntry(qName, attributes, "timezone.gmtFormat");
             break;
+        case "gmtZeroFormat":
+            pushStringEntry(qName, attributes, "timezone.gmtZeroFormat");
+            break;
+        case "regionFormat":
+            {
+                String type = attributes.getValue("type");
+                pushStringEntry(qName, attributes, "timezone.regionFormat" +
+                    (type == null ? "" : "." + type));
+            }
+            break;
         case "zone":
             {
                 String tzid = attributes.getValue("type"); // Olson tz id
@@ -474,8 +495,8 @@
         case "daylight": // daylight saving (summer) time name
             pushStringEntry(qName, attributes, CLDRConverter.ZONE_NAME_PREFIX + qName + "." + zoneNameStyle);
             break;
-        case "exemplarCity":  // not used in JDK
-            pushIgnoredContainer(qName);
+        case "exemplarCity":
+            pushStringEntry(qName, attributes, CLDRConverter.EXEMPLAR_CITY_PREFIX);
             break;
 
         //
@@ -877,11 +898,16 @@
         case "generic":
         case "standard":
         case "daylight":
+        case "exemplarCity":
             if (zonePrefix != null && (currentContainer instanceof Entry)) {
                 @SuppressWarnings("unchecked")
                 Map<String, String> valmap = (Map<String, String>) get(zonePrefix + getContainerKey());
                 Entry<?> entry = (Entry<?>) currentContainer;
-                valmap.put(entry.getKey(), (String) entry.getValue());
+                if (qName.equals("exemplarCity")) {
+                    put(CLDRConverter.EXEMPLAR_CITY_PREFIX + getContainerKey(), (String) entry.getValue());
+                } else {
+                    valmap.put(entry.getKey(), (String) entry.getValue());
+                }
             }
             break;
 
--- a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Thu Apr 26 22:11:15 2018 +0200
@@ -35,6 +35,8 @@
 import org.xml.sax.SAXException;
 
 class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
+    final static String NO_METAZONE_KEY = "no.metazone.defined";
+
     private String tzid, metazone;
 
     // for java.time.format.ZoneNames.java
@@ -101,10 +103,17 @@
         assert qName.equals(currentContainer.getqName()) : "current=" + currentContainer.getqName() + ", param=" + qName;
         switch (qName) {
         case "timezone":
-            if (tzid == null || metazone == null) {
+            if (tzid == null) {
                 throw new InternalError();
+            } else if (metazone == null) {
+                String no_meta = get(NO_METAZONE_KEY);
+                put(NO_METAZONE_KEY, no_meta == null ? tzid : no_meta + " " + tzid);
+                CLDRConverter.info("No metazone defined for %s%n", tzid);
+            } else {
+                put(tzid, metazone);
             }
-            put(tzid, metazone);
+            tzid = null;
+            metazone = null;
             break;
         }
         currentContainer = currentContainer.getParent();
--- a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java	Thu Apr 26 22:11:15 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -211,11 +211,13 @@
                     if (value == null) {
                         CLDRConverter.warning("null value for " + key);
                     } else if (value instanceof String) {
-                        if (type == BundleType.TIMEZONE ||
-                            ((String)value).startsWith(META_VALUE_PREFIX)) {
-                            out.printf("            { \"%s\", %s },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+                        String valStr = (String)value;
+                        if (type == BundleType.TIMEZONE &&
+                            !key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
+                            valStr.startsWith(META_VALUE_PREFIX)) {
+                            out.printf("            { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava));
                         } else {
-                            out.printf("            { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+                            out.printf("            { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert(valStr, useJava));
                         }
                     } else if (value instanceof String[]) {
                         String[] values = (String[]) value;
@@ -308,15 +310,20 @@
 
             // end of static initializer block.
 
-            // Short TZ names for delayed initialization
+            // Canonical TZ names for delayed initialization
             if (CLDRConverter.isBaseModule) {
-                out.printf("    private static class TZShortIDMapHolder {\n");
-                out.printf("        static final Map<String, String> tzShortIDMap = new HashMap<>();\n");
+                out.printf("    private static class TZCanonicalIDMapHolder {\n");
+                out.printf("        static final Map<String, String> tzCanonicalIDMap = new HashMap<>(600);\n");
                 out.printf("        static {\n");
                 CLDRConverter.handlerTimeZone.getData().entrySet().stream()
                     .forEach(e -> {
-                        out.printf("            tzShortIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
-                                ((String)e.getValue()));
+                        String[] ids = ((String)e.getValue()).split("\\s");
+                        out.printf("            tzCanonicalIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
+                                ids[0]);
+                        for (int i = 1; i < ids.length; i++) {
+                            out.printf("            tzCanonicalIDMap.put(\"%s\", \"%s\");\n", ids[i],
+                                ids[0]);
+                        }
                     });
                 out.printf("        }\n    }\n\n");
             }
@@ -333,8 +340,8 @@
 
             if (CLDRConverter.isBaseModule) {
                 out.printf("    @Override\n" +
-                           "    public Map<String, String> tzShortIDs() {\n" +
-                           "        return TZShortIDMapHolder.tzShortIDMap;\n" +
+                           "    public Map<String, String> tzCanonicalIDs() {\n" +
+                           "        return TZCanonicalIDMapHolder.tzCanonicalIDMap;\n" +
                            "    }\n\n");
                 out.printf("    public Map<Locale, String[]> parentLocales() {\n" +
                            "        return parentLocalesMap;\n" +
--- a/make/launcher/Launcher-jdk.pack.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/launcher/Launcher-jdk.pack.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -88,7 +88,6 @@
     CFLAGS_solaris := -KPIC, \
     CFLAGS_macosx := -fPIC, \
     DISABLED_WARNINGS_gcc := unused-result implicit-fallthrough, \
-    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(UNPACKEXE_ZIPOBJS) \
         $(LDFLAGS_JDKEXE) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/lib/Awt2dLibraries.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/lib/Awt2dLibraries.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -224,7 +224,7 @@
         format-nonliteral parentheses, \
     DISABLED_WARNINGS_clang := logical-op-parentheses extern-initializer, \
     DISABLED_WARNINGS_solstudio := E_DECLARATION_IN_CODE, \
-    DISABLED_WARNINGS_microsoft := 4297 4244 4267 4996, \
+    DISABLED_WARNINGS_microsoft := 4297 4244 4267 4291 4302 4311 4996, \
     ASFLAGS := $(LIBAWT_ASFLAGS), \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_macosx := -L$(INSTALL_LIBRARIES_HERE), \
--- a/make/lib/Lib-jdk.pack.gmk	Sat Apr 21 16:33:20 2018 -0700
+++ b/make/lib/Lib-jdk.pack.gmk	Thu Apr 26 22:11:15 2018 +0200
@@ -40,7 +40,6 @@
         $(LIBJAVA_HEADER_FLAGS), \
     CFLAGS_release := -DPRODUCT, \
     DISABLED_WARNINGS_gcc := implicit-fallthrough, \
-    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_windows := -map:$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/unpack.map -debug, \
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
+#include "runtime/jniHandles.hpp"
 
 #define __ masm->
 
@@ -64,3 +65,10 @@
   default: Unimplemented();
   }
 }
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+  // If mask changes we need to ensure that the inverse is still encodable as an immediate
+  STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
+  __ andr(robj, robj, ~JNIHandles::weak_tag_mask);
+  __ ldr(robj, Address(robj, 0));             // *obj
+}
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -40,6 +40,8 @@
   virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                         Address dst, Register val, Register tmp1, Register tmp2);
 
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
   virtual void barrier_stubs_init() {}
 };
 
--- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -25,6 +25,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
@@ -82,11 +84,9 @@
                                               // robj ^ rcounter ^ rcounter == robj
                                               // robj is address dependent on rcounter.
 
-  // If mask changes we need to ensure that the inverse is still encodable as an immediate
-  STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
-  __ andr(robj, robj, ~JNIHandles::weak_tag_mask);
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->try_resolve_jobject_in_native(masm, robj, rscratch1, slow);
 
-  __ ldr(robj, Address(robj, 0));             // *obj
   __ lsr(roffset, c_rarg2, 2);                // offset
 
   assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
@@ -177,4 +177,3 @@
 address JNI_FastGetField::generate_fast_get_double_field() {
   return generate_fast_get_int_field0(T_DOUBLE);
 }
-
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -30,6 +30,7 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 
 #define __ _masm->
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,9 +133,29 @@
     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
     *(intptr_t*)addr = x;
   } else {
+    // Store x into the instruction stream.
     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
     ICache::invalidate_range(instruction_address(), instruction_size);
   }
+
+  // Find and replace the oop/metadata corresponding to this
+  // instruction in oops section.
+  CodeBlob* cb = CodeCache::find_blob(instruction_address());
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    RelocIterator iter(nm, instruction_address(), next_instruction_address());
+    while (iter.next()) {
+      if (iter.type() == relocInfo::oop_type) {
+        oop* oop_addr = iter.oop_reloc()->oop_addr();
+        *oop_addr = cast_to_oop(x);
+        break;
+      } else if (iter.type() == relocInfo::metadata_type) {
+        Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
+        *metadata_addr = (Metadata*)x;
+        break;
+      }
+    }
+  }
 }
 
 void NativeMovConstReg::print() {
@@ -348,7 +368,7 @@
   CodeBuffer cb(code_pos, instruction_size);
   MacroAssembler a(&cb);
 
-  a.mov(rscratch1, entry);
+  a.movptr(rscratch1, (uintptr_t)entry);
   a.br(rscratch1);
 
   ICache::invalidate_range(code_pos, instruction_size);
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
 
 #define __ _masm->
 
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -29,6 +29,7 @@
 #include "nativeInst_arm.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
--- a/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
 
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "runtime/jniHandles.hpp"
 
 #define __ masm->
 
@@ -98,3 +99,8 @@
   default: Unimplemented();
   }
 }
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+  __ andn (robj, JNIHandles::weak_tag_mask, robj);
+  __ ld_ptr(robj, 0, robj);
+}
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -44,6 +44,9 @@
   virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                        Address src, Register dst, Register tmp);
 
+  // Support for jniFastGetField to try resolving a jobject/jweak in native
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
   virtual void barrier_stubs_init() {}
 };
 
--- a/src/hotspot/cpu/sparc/jniFastGetField_sparc.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/sparc/jniFastGetField_sparc.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
@@ -68,17 +70,18 @@
   __ andcc (G4, 1, G0);
   __ br (Assembler::notZero, false, Assembler::pn, label1);
   __ delayed()->srl (O2, 2, O4);
-  __ andn (O1, JNIHandles::weak_tag_mask, O1);
-  __ ld_ptr (O1, 0, O5);
+
+  BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->try_resolve_jobject_in_native(masm, O1, G3_scratch, label1);
 
   assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
   speculative_load_pclist[count] = __ pc();
   switch (type) {
-    case T_BOOLEAN: __ ldub (O5, O4, G3);  break;
-    case T_BYTE:    __ ldsb (O5, O4, G3);  break;
-    case T_CHAR:    __ lduh (O5, O4, G3);  break;
-    case T_SHORT:   __ ldsh (O5, O4, G3);  break;
-    case T_INT:     __ ld (O5, O4, G3);    break;
+    case T_BOOLEAN: __ ldub (O1, O4, G3);  break;
+    case T_BYTE:    __ ldsb (O1, O4, G3);  break;
+    case T_CHAR:    __ lduh (O1, O4, G3);  break;
+    case T_SHORT:   __ ldsh (O1, O4, G3);  break;
+    case T_INT:     __ ld (O1, O4, G3);    break;
     default:        ShouldNotReachHere();
   }
 
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -35,6 +35,7 @@
 #include "oops/klass.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/objectMonitor.hpp"
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -31,6 +31,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "utilities/preserveException.hpp"
 
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
+#include "runtime/jniHandles.hpp"
 
 #define __ masm->
 
@@ -108,3 +109,8 @@
   default: Unimplemented();
   }
 }
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+  __ clear_jweak_tag(robj);
+  __ movptr(robj, Address(robj, 0));
+}
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -44,6 +44,9 @@
   virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                         Address dst, Register val, Register tmp1, Register tmp2);
 
+  // Support for jniFastGetField to try resolving a jobject/jweak in native
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
   virtual void barrier_stubs_init() {}
 };
 
--- a/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
@@ -81,12 +83,12 @@
                                                 // robj is data dependent on rcounter.
   }
 
-  __ clear_jweak_tag(robj);
-
-  __ movptr(robj, Address(robj, 0));             // *obj
   __ mov   (roffset, c_rarg2);
   __ shrptr(roffset, 2);                         // offset
 
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->try_resolve_jobject_in_native(masm, robj, rscratch1, slow);
+
   assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
   speculative_load_pclist[count] = __ pc();
   switch (type) {
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -37,6 +37,7 @@
 #include "oops/klass.inline.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/objectMonitor.hpp"
 #include "runtime/os.hpp"
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -31,6 +31,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "utilities/preserveException.hpp"
 
--- a/src/hotspot/share/adlc/formssel.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1171,6 +1171,9 @@
   else if (is_ideal_nop()) {
     return "MachNopNode";
   }
+  else if (is_ideal_jump()) {
+    return "MachJumpNode";
+  }
   else if (is_mach_constant()) {
     return "MachConstantNode";
   }
--- a/src/hotspot/share/adlc/output_c.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/adlc/output_c.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -3936,6 +3936,9 @@
     fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent);
     fprintf(fp_cpp, "%s node->_fcnt = _leaf->as_If()->_fcnt;\n", indent);
   }
+  if (inst->is_ideal_jump()) {
+    fprintf(fp_cpp, "%s node->_probs = _leaf->as_Jump()->_probs;\n", indent);
+  }
   if( inst->is_ideal_fastlock() ) {
     fprintf(fp_cpp, "%s node->_counters = _leaf->as_FastLock()->counters();\n", indent);
     fprintf(fp_cpp, "%s node->_rtm_counters = _leaf->as_FastLock()->rtm_counters();\n", indent);
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -445,6 +445,8 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_handle_wrong_method_stub", address, SharedRuntime::get_handle_wrong_method_stub());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notify", address, JVMCIRuntime::object_notify);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_object_notifyAll", address, JVMCIRuntime::object_notifyAll);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_dynamic_invoke", address, CompilerRuntime::resolve_dynamic_invoke);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol);
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -285,7 +285,7 @@
 
 protected:
   virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
-  virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) { return false; }
+  virtual bool do_unloading_jvmci(bool unloading_occurred) { return false; }
 
 };
 
--- a/src/hotspot/share/aot/aotLoader.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/aot/aotLoader.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -183,28 +183,21 @@
     // Shifts are static values which initialized by 0 until java heap initialization.
     // AOT libs are loaded before heap initialized so shift values are not set.
     // It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
-    // Set shifts value based on first AOT library config.
+    // AOT sets shift values during heap and metaspace initialization.
+    // Check shifts value to make sure thay did not change.
     if (UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
       int oop_shift = Universe::narrow_oop_shift();
-      if (oop_shift == 0) {
-        Universe::set_narrow_oop_shift(AOTLib::narrow_oop_shift());
-      } else {
-        FOR_ALL_AOT_LIBRARIES(lib) {
-          (*lib)->verify_flag(AOTLib::narrow_oop_shift(), oop_shift, "Universe::narrow_oop_shift");
-        }
+      FOR_ALL_AOT_LIBRARIES(lib) {
+        (*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "Universe::narrow_oop_shift");
       }
       if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
         int klass_shift = Universe::narrow_klass_shift();
-        if (klass_shift == 0) {
-          Universe::set_narrow_klass_shift(AOTLib::narrow_klass_shift());
-        } else {
-          FOR_ALL_AOT_LIBRARIES(lib) {
-            (*lib)->verify_flag(AOTLib::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift");
-          }
+        FOR_ALL_AOT_LIBRARIES(lib) {
+          (*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "Universe::narrow_klass_shift");
         }
       }
     }
-    // Create heaps for all the libraries
+    // Create heaps for all valid libraries
     FOR_ALL_AOT_LIBRARIES(lib) {
       if ((*lib)->is_valid()) {
         AOTCodeHeap* heap = new AOTCodeHeap(*lib);
@@ -213,6 +206,9 @@
           add_heap(heap);
           CodeCache::add_heap(heap);
         }
+      } else {
+        // Unload invalid libraries
+        os::dll_unload((*lib)->dl_handle());
       }
     }
   }
@@ -223,20 +219,29 @@
   }
 }
 
+// Set shift value for compressed oops and classes based on first AOT library config.
+// AOTLoader::universe_init(), which is called later, will check the shift value again to make sure nobody change it.
+// This code is not executed during CDS dump because it runs in Interpreter mode and AOT is disabled in this mode.
+
+void AOTLoader::set_narrow_oop_shift() {
+  // This method is called from Universe::initialize_heap().
+  if (UseAOT && libraries_count() > 0 &&
+      UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
+    if (Universe::narrow_oop_shift() == 0) {
+      // 0 is valid shift value for small heap but we can safely increase it
+      // at this point when nobody used it yet.
+      Universe::set_narrow_oop_shift(AOTLib::narrow_oop_shift());
+    }
+  }
+}
+
 void AOTLoader::set_narrow_klass_shift() {
-  // This method could be called from Metaspace::set_narrow_klass_base_and_shift().
-  // In case it is not called (during dump CDS, for example) the corresponding code in
-  // AOTLoader::universe_init(), which is called later, will set the shift value.
+  // This method is called from Metaspace::set_narrow_klass_base_and_shift().
   if (UseAOT && libraries_count() > 0 &&
       UseCompressedOops && AOTLib::narrow_oop_shift_initialized() &&
       UseCompressedClassPointers) {
-    int klass_shift = Universe::narrow_klass_shift();
-    if (klass_shift == 0) {
+    if (Universe::narrow_klass_shift() == 0) {
       Universe::set_narrow_klass_shift(AOTLib::narrow_klass_shift());
-    } else {
-      FOR_ALL_AOT_LIBRARIES(lib) {
-        (*lib)->verify_flag(AOTLib::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift");
-      }
     }
   }
 }
--- a/src/hotspot/share/aot/aotLoader.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/aot/aotLoader.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -57,6 +57,7 @@
   static void initialize() NOT_AOT({ FLAG_SET_ERGO(bool, UseAOT, false); });
 
   static void universe_init() NOT_AOT_RETURN;
+  static void set_narrow_oop_shift() NOT_AOT_RETURN;
   static void set_narrow_klass_shift() NOT_AOT_RETURN;
   static bool contains(address p) NOT_AOT({ return false; });
   static void load_for_klass(InstanceKlass* ik, Thread* thread) NOT_AOT_RETURN;
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1324,7 +1324,7 @@
 void GraphBuilder::table_switch() {
   Bytecode_tableswitch sw(stream());
   const int l = sw.length();
-  if (CanonicalizeNodes && l == 1) {
+  if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
     // total of 2 successors => use If instead of switch
     // Note: This code should go into the canonicalizer as soon as it can
     //       can handle canonicalized forms that contain more than one node.
@@ -1368,7 +1368,7 @@
 void GraphBuilder::lookup_switch() {
   Bytecode_lookupswitch sw(stream());
   const int l = sw.number_of_pairs();
-  if (CanonicalizeNodes && l == 1) {
+  if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) {
     // total of 2 successors => use If instead of switch
     // Note: This code should go into the canonicalizer as soon as it can
     //       can handle canonicalized forms that contain more than one node.
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -2552,6 +2552,36 @@
   int hi_key = x->hi_key();
   int len = x->length();
   LIR_Opr value = tag.result();
+
+  if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
+    ciMethod* method = x->state()->scope()->method();
+    ciMethodData* md = method->method_data_or_null();
+    ciProfileData* data = md->bci_to_data(x->state()->bci());
+    assert(data->is_MultiBranchData(), "bad profile data?");
+    int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
+    LIR_Opr md_reg = new_register(T_METADATA);
+    __ metadata2reg(md->constant_encoding(), md_reg);
+    LIR_Opr data_offset_reg = new_pointer_register();
+    LIR_Opr tmp_reg = new_pointer_register();
+
+    __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
+    for (int i = 0; i < len; i++) {
+      int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
+      __ cmp(lir_cond_equal, value, i + lo_key);
+      __ move(data_offset_reg, tmp_reg);
+      __ cmove(lir_cond_equal,
+               LIR_OprFact::intptrConst(count_offset),
+               tmp_reg,
+               data_offset_reg, T_INT);
+    }
+
+    LIR_Opr data_reg = new_pointer_register();
+    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
+    __ move(data_addr, data_reg);
+    __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
+    __ move(data_reg, data_addr);
+  }
+
   if (UseTableRanges) {
     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
   } else {
@@ -2577,6 +2607,37 @@
   move_to_phi(x->state());
 
   LIR_Opr value = tag.result();
+  int len = x->length();
+
+  if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {
+    ciMethod* method = x->state()->scope()->method();
+    ciMethodData* md = method->method_data_or_null();
+    ciProfileData* data = md->bci_to_data(x->state()->bci());
+    assert(data->is_MultiBranchData(), "bad profile data?");
+    int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());
+    LIR_Opr md_reg = new_register(T_METADATA);
+    __ metadata2reg(md->constant_encoding(), md_reg);
+    LIR_Opr data_offset_reg = new_pointer_register();
+    LIR_Opr tmp_reg = new_pointer_register();
+
+    __ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);
+    for (int i = 0; i < len; i++) {
+      int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));
+      __ cmp(lir_cond_equal, value, x->key_at(i));
+      __ move(data_offset_reg, tmp_reg);
+      __ cmove(lir_cond_equal,
+               LIR_OprFact::intptrConst(count_offset),
+               tmp_reg,
+               data_offset_reg, T_INT);
+    }
+
+    LIR_Opr data_reg = new_pointer_register();
+    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
+    __ move(data_addr, data_reg);
+    __ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);
+    __ move(data_reg, data_addr);
+  }
+
   if (UseTableRanges) {
     do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
   } else {
--- a/src/hotspot/share/classfile/classLoader.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/classfile/classLoader.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -80,13 +80,13 @@
 
 // Entry points in zip.dll for loading zip/jar file entries
 
-typedef void * * (JNICALL *ZipOpen_t)(const char *name, char **pmsg);
-typedef void (JNICALL *ZipClose_t)(jzfile *zip);
-typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
-typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
-typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
-typedef jboolean (JNICALL *ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
-typedef jint     (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
+typedef void * * (*ZipOpen_t)(const char *name, char **pmsg);
+typedef void (*ZipClose_t)(jzfile *zip);
+typedef jzentry* (*FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
+typedef jboolean (*ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
+typedef jzentry* (*GetNextEntry_t)(jzfile *zip, jint n);
+typedef jboolean (*ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg);
+typedef jint     (*Crc32_t)(jint crc, const jbyte *buf, jint len);
 
 static ZipOpen_t         ZipOpen            = NULL;
 static ZipClose_t        ZipClose           = NULL;
--- a/src/hotspot/share/code/codeHeapState.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/codeHeapState.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -2050,16 +2050,10 @@
 }
 
 
-#define JDK8200450_REMEDY
-#define JDK8200450_TRACE
 void CodeHeapState::print_names(outputStream* out, CodeHeap* heap) {
   if (!initialization_complete) {
     return;
   }
-#ifdef JDK8200450_TRACE
-  out->print_cr("print_names() entered for heap @ " INTPTR_FORMAT, p2i(heap));
-  out->flush();
-#endif
 
   const char* heapName   = get_heapName(heap);
   get_HeapStatGlobals(out, heapName);
@@ -2105,41 +2099,18 @@
     // Only check granule if it contains at least one blob.
     unsigned int nBlobs  = StatArray[ix].t1_count   + StatArray[ix].t2_count + StatArray[ix].tx_count +
                            StatArray[ix].stub_count + StatArray[ix].dead_count;
-#ifdef JDK8200450_REMEDY
-    if (nBlobs > 0 )
-#endif
-    {
+    if (nBlobs > 0 ) {
     for (unsigned int is = 0; is < granule_size; is+=(unsigned int)seg_size) {
       // heap->find_start() is safe. Only working with _segmap. Returns NULL or void*. Returned CodeBlob may be uninitialized.
       CodeBlob* this_blob = (CodeBlob *)(heap->find_start(low_bound+ix*granule_size+is));
-#ifndef JDK8200450_REMEDY
-      bool blob_initialized = (this_blob != NULL)
-#else
-#ifndef JDK8200450_TRACE
       bool blob_initialized = (this_blob != NULL) && (this_blob->header_size() >= 0) && (this_blob->relocation_size() >= 0) &&
                               ((address)this_blob + this_blob->header_size() == (address)(this_blob->relocation_begin())) &&
-                              ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin()) &&
-                              is_readable_pointer((address)(this_blob->relocation_begin()) &&
+                              ((address)this_blob + CodeBlob::align_code_offset(this_blob->header_size() + this_blob->relocation_size()) == (address)(this_blob->content_begin())) &&
+                              is_readable_pointer((address)(this_blob->relocation_begin())) &&
                               is_readable_pointer(this_blob->content_begin());
-#else
-      int   hdr_size      = 0;
-      int   reloc_size    = 0;
-      address reloc_begin = NULL;
-      address cntnt_begin = NULL;
-      if (this_blob != NULL) {
-        hdr_size    = this_blob->header_size();
-        reloc_size  = this_blob->relocation_size();
-        reloc_begin = (address)(this_blob->relocation_begin());
-        cntnt_begin = this_blob->content_begin();
-      }
-      bool blob_initialized = (this_blob != NULL) && (hdr_size >= 0) && (reloc_size >= 0) &&
-                              ((address)this_blob + hdr_size == reloc_begin) &&
-                              ((address)this_blob + CodeBlob::align_code_offset(hdr_size + reloc_size) == cntnt_begin) &&
-                              is_readable_pointer(reloc_begin) &&
-                              is_readable_pointer(cntnt_begin);
-#endif
-#endif
-      if (blob_initialized && (this_blob != last_blob)) {
+      // blob could have been flushed, freed, and merged.
+      // this_blob < last_blob is an indicator for that.
+      if (blob_initialized && (this_blob > last_blob)) {
         last_blob          = this_blob;
 
         //---<  get type and name  >---
@@ -2147,15 +2118,13 @@
         if (segment_granules) {
           cbType = (blobType)StatArray[ix].type;
         } else {
-          cbType = get_cbType(this_blob);  // Is this here safe?
+          cbType = get_cbType(this_blob);
         }
-        // this_blob->name() could return NULL if no name is given to CTOR. Inlined, maybe invisible on stack
+        // this_blob->name() could return NULL if no name was given to CTOR. Inlined, maybe invisible on stack
         const char* blob_name = this_blob->name();
-#ifdef JDK8200450_REMEDY
-        if (blob_name == NULL) {
+        if ((blob_name == NULL) || !is_readable_pointer(blob_name)) {
           blob_name = "<unavailable>";
         }
-#endif
 
         //---<  print table header for new print range  >---
         if (!name_in_addr_range) {
@@ -2174,24 +2143,16 @@
         ast->print("(+" PTR32_FORMAT ")", (unsigned int)((char*)this_blob-low_bound));
         ast->fill_to(33);
 
-#ifdef JDK8200450_TRACE
-        STRINGSTREAM_FLUSH_LOCKED("")   // Remove before push!!!
-#endif
-
         // this_blob->as_nmethod_or_null() is safe. Inlined, maybe invisible on stack.
         nmethod*    nm     = this_blob->as_nmethod_or_null();
         Method*     method = (nm == NULL) ? NULL : nm->method();  // may be uninitialized, i.e. != NULL, but invalid
-#ifdef JDK8200450_REMEDY
-        if ((nm != NULL) && (method != NULL) && is_readable_pointer(method) && is_readable_pointer(method->constants())) {
-#else
-        if ((nm != NULL) && (method != NULL)) {
-#endif
+        if ((nm != NULL) && (method != NULL) && (cbType != nMethod_dead) &&
+            is_readable_pointer(method) && is_readable_pointer(method->constants())) {
           ResourceMark rm;
           //---<  collect all data to locals as quickly as possible  >---
           unsigned int total_size = nm->total_size();
           int          hotness    = nm->hotness_counter();
-          bool         nm_zombie  = nm->is_zombie();
-          bool         get_name   = nm->is_in_use() || nm->is_not_entrant();
+          bool         get_name   = (cbType == nMethod_inuse) || (cbType == nMethod_notused);
           //---<  nMethod size in hex  >---
           ast->print(PTR32_FORMAT, total_size);
           ast->print("(" SIZE_FORMAT_W(4) "K)", total_size/K);
@@ -2205,16 +2166,11 @@
           ast->fill_to(62+6);
           ast->print("%s", blobTypeName[cbType]);
           ast->fill_to(82+6);
-          if (nm_zombie) {
+          if (cbType == nMethod_dead) {
             ast->print("%14s", " zombie method");
           }
 
-#ifdef JDK8200450_TRACE
-        STRINGSTREAM_FLUSH_LOCKED("")   // Remove before push!!!
-#endif
-
           if (get_name) {
-#ifdef JDK8200450_REMEDY
             Symbol* methName  = method->name();
             const char*   methNameS = (methName == NULL) ? NULL : methName->as_C_string();
             methNameS = (methNameS == NULL) ? "<method name unavailable>" : methNameS;
@@ -2223,10 +2179,6 @@
             methSigS  = (methSigS  == NULL) ? "<method signature unavailable>" : methSigS;
             ast->print("%s", methNameS);
             ast->print("%s", methSigS);
-#else
-            blob_name = method->name_and_sig_as_C_string();
-            ast->print("%s", blob_name);
-#endif
           } else {
             ast->print("%s", blob_name);
           }
@@ -2237,45 +2189,9 @@
           ast->print("%s", blob_name);
         }
         STRINGSTREAM_FLUSH_LOCKED("\n")
-#ifdef JDK8200450_TRACE
-        if ((nm != NULL) && (method != NULL) && !(is_readable_pointer(method) && is_readable_pointer(method->constants()))) {
-          ast->print("Potential CodeHeap State Analytics issue found.\n");
-          if (is_readable_pointer(method)) {
-            ast->print("  Issue would have been detected by is_readable_pointer(" INTPTR_FORMAT "(method->constants())) check.\n", p2i(method->constants()));
-          } else {
-            ast->print("  Issue would have been detected by is_readable_pointer(" INTPTR_FORMAT "(method)) check.\n", p2i(method));
-          }
-          STRINGSTREAM_FLUSH_LOCKED("\n")
-        }
-#endif
       } else if (!blob_initialized && (this_blob != last_blob) && (this_blob != NULL)) {
         last_blob          = this_blob;
-#ifdef JDK8200450_TRACE
-        ast->print("Potential CodeHeap State Analytics issue found.\n");
-        if (nBlobs == 0) {
-          ast->print("  Issue would have been detected by (nBlobs > 0) check.\n");
-        } else {
-          if (!((address)this_blob + hdr_size == reloc_begin)) {
-            ast->print("  Issue would have been detected by (this(" INTPTR_FORMAT ") + header(%d) == relocation_begin(" INTPTR_FORMAT ")) check.\n", p2i(this_blob), hdr_size, p2i(reloc_begin));
-          }
-          if (!((address)this_blob + CodeBlob::align_code_offset(hdr_size + reloc_size) == cntnt_begin)) {
-            ast->print("  Issue would have been detected by (this(" INTPTR_FORMAT ") + header(%d) + relocation(%d) == content_begin(" INTPTR_FORMAT ")) check.\n", p2i(this_blob), hdr_size, reloc_size, p2i(cntnt_begin));
-          }
-          if (hdr_size    != this_blob->header_size()) {
-            ast->print("  header_size      meanwhile changed from %d to %d\n", hdr_size, this_blob->header_size());
-          }
-          if (reloc_size  != this_blob->relocation_size()) {
-            ast->print("  relocation_size  meanwhile changed from %d to %d\n", reloc_size, this_blob->relocation_size());
-          }
-          if (reloc_begin != (address)(this_blob->relocation_begin())) {
-            ast->print("  relocation_begin meanwhile changed from " INTPTR_FORMAT " to " INTPTR_FORMAT "\n", p2i(reloc_begin), p2i(this_blob->relocation_begin()));
-          }
-          if (cntnt_begin != this_blob->content_begin()) {
-            ast->print("  relocation_begin meanwhile changed from " INTPTR_FORMAT " to " INTPTR_FORMAT "\n", p2i(cntnt_begin), p2i(this_blob->content_begin()));
-          }
-        }
         STRINGSTREAM_FLUSH_LOCKED("\n")
-#endif
       }
     }
     } // nBlobs > 0
@@ -2430,7 +2346,7 @@
 }
 
 CodeHeapState::blobType CodeHeapState::get_cbType(CodeBlob* cb) {
-  if (cb != NULL ) {
+  if ((cb != NULL) && is_readable_pointer(cb)) {
     if (cb->is_runtime_stub())                return runtimeStub;
     if (cb->is_deoptimization_stub())         return deoptimizationStub;
     if (cb->is_uncommon_trap_stub())          return uncommonTrapStub;
@@ -2440,13 +2356,13 @@
     if (cb->is_method_handles_adapter_blob()) return mh_adapterBlob;
     if (cb->is_buffer_blob())                 return bufferBlob;
 
-    if (cb->is_nmethod() ) {
-      if (((nmethod*)cb)->is_in_use())        return nMethod_inuse;
-      if (((nmethod*)cb)->is_alive() && !(((nmethod*)cb)->is_not_entrant()))   return nMethod_notused;
-      if (((nmethod*)cb)->is_alive())         return nMethod_alive;
-      if (((nmethod*)cb)->is_unloaded())      return nMethod_unloaded;
-      if (((nmethod*)cb)->is_zombie())        return nMethod_dead;
-      tty->print_cr("unhandled nmethod state");
+    nmethod*  nm = cb->as_nmethod_or_null();
+    if (nm != NULL) { // no is_readable check required, nm = (nmethod*)cb.
+      if (nm->is_zombie())        return nMethod_dead;
+      if (nm->is_unloaded())      return nMethod_unloaded;
+      if (nm->is_alive() && !(nm->is_not_entrant()))   return nMethod_notused;
+      if (nm->is_alive())         return nMethod_alive;
+      if (nm->is_in_use())        return nMethod_inuse;
       return nMethod_dead;
     }
   }
--- a/src/hotspot/share/code/compiledMethod.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -525,7 +525,7 @@
   }
 
 #if INCLUDE_JVMCI
-  if (do_unloading_jvmci(is_alive, unloading_occurred)) {
+  if (do_unloading_jvmci(unloading_occurred)) {
     return;
   }
 #endif
@@ -535,7 +535,7 @@
 }
 
 template <class CompiledICorStaticCall>
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) {
   // Ok, to lookup references to zombies here
   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
@@ -555,12 +555,12 @@
   return false;
 }
 
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
-  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) {
+  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from);
 }
 
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
-  return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) {
+  return clean_if_nmethod_is_unloaded(csc, csc->destination(), from);
 }
 
 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
@@ -608,15 +608,15 @@
         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
       }
 
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
       break;
 
     case relocInfo::opt_virtual_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
       break;
 
     case relocInfo::static_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
       break;
 
     case relocInfo::oop_type:
@@ -636,7 +636,7 @@
   }
 
 #if INCLUDE_JVMCI
-  if (do_unloading_jvmci(is_alive, unloading_occurred)) {
+  if (do_unloading_jvmci(unloading_occurred)) {
     return postponed;
   }
 #endif
@@ -647,7 +647,7 @@
   return postponed;
 }
 
-void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
+void CompiledMethod::do_unloading_parallel_postponed() {
   ResourceMark rm;
 
   // Make sure the oop's ready to receive visitors
@@ -671,15 +671,15 @@
     switch (iter.type()) {
 
     case relocInfo::virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
       break;
 
     case relocInfo::opt_virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
       break;
 
     case relocInfo::static_call_type:
-      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
+      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
       break;
 
     default:
--- a/src/hotspot/share/code/compiledMethod.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -372,7 +372,7 @@
   virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   //  The parallel versions are used by G1.
   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
-  virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
+  virtual void do_unloading_parallel_postponed();
 
   static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
   static void increase_unloading_clock();
@@ -383,7 +383,7 @@
 protected:
   virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
 #if INCLUDE_JVMCI
-  virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
+  virtual bool do_unloading_jvmci(bool unloading_occurred) = 0;
 #endif
 
 private:
--- a/src/hotspot/share/code/dependencies.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/dependencies.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/objArrayKlass.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
--- a/src/hotspot/share/code/nmethod.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -47,6 +47,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
@@ -1028,17 +1029,16 @@
   mdo->inc_decompile_count();
 }
 
-void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
+void nmethod::make_unloaded(oop cause) {
 
   post_compiled_method_unload();
 
-  // Since this nmethod is being unloaded, make sure that dependencies
-  // recorded in instanceKlasses get flushed and pass non-NULL closure to
-  // indicate that this work is being done during a GC.
+  // This nmethod is being unloaded, make sure that dependencies
+  // recorded in instanceKlasses get flushed.
+  // Since this work is being done during a GC, defer deleting dependencies from the
+  // InstanceKlass.
   assert(Universe::heap()->is_gc_active(), "should only be called during gc");
-  assert(is_alive != NULL, "Should be non-NULL");
-  // A non-NULL is_alive closure indicates that this is being called during GC.
-  flush_dependencies(is_alive);
+  flush_dependencies(/*delete_immediately*/false);
 
   // Break cycle between nmethod & method
   LogTarget(Trace, class, unload) lt;
@@ -1261,7 +1261,7 @@
       if (nmethod_needs_unregister) {
         Universe::heap()->unregister_nmethod(this);
       }
-      flush_dependencies(NULL);
+      flush_dependencies(/*delete_immediately*/true);
     }
 
     // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload
@@ -1344,13 +1344,13 @@
 // of dependencies must happen during phase 1 since after GC any
 // dependencies in the unloaded nmethod won't be updated, so
 // traversing the dependency information in unsafe.  In that case this
-// function is called with a non-NULL argument and this function only
+// function is called with a boolean argument and this function only
 // notifies instanceKlasses that are reachable
 
-void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
+void nmethod::flush_dependencies(bool delete_immediately) {
   assert_locked_or_safepoint(CodeCache_lock);
-  assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
-  "is_alive is non-NULL if and only if we are called during GC");
+  assert(Universe::heap()->is_gc_active() != delete_immediately,
+  "delete_immediately is false if and only if we are called during GC");
   if (!has_flushed_dependencies()) {
     set_has_flushed_dependencies();
     for (Dependencies::DepStream deps(this); deps.next(); ) {
@@ -1363,13 +1363,12 @@
         if (klass == NULL) {
           continue;  // ignore things like evol_method
         }
-        // During GC the is_alive closure is non-NULL, and is used to
-        // determine liveness of dependees that need to be updated.
-        if (is_alive == NULL || klass->is_loader_alive()) {
+        // During GC delete_immediately is false, and liveness
+        // of dependee determines class that needs to be updated.
+        if (delete_immediately || klass->is_loader_alive()) {
           // The GC defers deletion of this entry, since there might be multiple threads
           // iterating over the _dependencies graph. Other call paths are single-threaded
           // and may delete it immediately.
-          bool delete_immediately = is_alive == NULL;
           InstanceKlass::cast(klass)->remove_dependent_nmethod(this, delete_immediately);
         }
       }
@@ -1390,7 +1389,7 @@
   // simply because one of its constant oops has gone dead.
   // No actual classes need to be unloaded in order for this to occur.
   assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
-  make_unloaded(is_alive, obj);
+  make_unloaded(obj);
   return true;
 }
 
@@ -1516,12 +1515,12 @@
 }
 
 #if INCLUDE_JVMCI
-bool nmethod::do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
   if (_jvmci_installed_code != NULL) {
     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
       if (_jvmci_installed_code_triggers_unloading) {
         // jweak reference processing has already cleared the referent
-        make_unloaded(is_alive, NULL);
+        make_unloaded(NULL);
         return true;
       } else {
         clear_jvmci_installed_code();
--- a/src/hotspot/share/code/nmethod.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -349,10 +349,10 @@
     return _state;
   }
 
-  void  make_unloaded(BoolObjectClosure* is_alive, oop cause);
+  void  make_unloaded(oop cause);
 
   bool has_dependencies()                         { return dependencies_size() != 0; }
-  void flush_dependencies(BoolObjectClosure* is_alive);
+  void flush_dependencies(bool delete_immediately);
   bool has_flushed_dependencies()                 { return _has_flushed_dependencies; }
   void set_has_flushed_dependencies()             {
     assert(!has_flushed_dependencies(), "should only happen once");
@@ -488,7 +488,7 @@
 #if INCLUDE_JVMCI
   // See comment for _jvmci_installed_code_triggers_unloading field.
   // Returns whether this nmethod was unloaded.
-  virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred);
+  virtual bool do_unloading_jvmci(bool unloading_occurred);
 #endif
 
  private:
--- a/src/hotspot/share/code/relocInfo.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/code/relocInfo.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "code/relocInfo.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/compressedOops.inline.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/copy.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/compiler/compileBroker.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -50,6 +50,7 @@
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/jniHandles.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -117,6 +118,17 @@
 // The installed compiler(s)
 AbstractCompiler* CompileBroker::_compilers[2];
 
+// The maximum numbers of compiler threads to be determined during startup.
+int CompileBroker::_c1_count = 0;
+int CompileBroker::_c2_count = 0;
+
+// An array of compiler names as Java String objects
+jobject* CompileBroker::_compiler1_objects = NULL;
+jobject* CompileBroker::_compiler2_objects = NULL;
+
+CompileLog** CompileBroker::_compiler1_logs = NULL;
+CompileLog** CompileBroker::_compiler2_logs = NULL;
+
 // These counters are used to assign an unique ID to each compilation.
 volatile jint CompileBroker::_compilation_id     = 0;
 volatile jint CompileBroker::_osr_compilation_id = 0;
@@ -287,6 +299,36 @@
 }
 
 /**
+ * Check if a CompilerThread can be removed and update count if requested.
+ */
+static bool can_remove(CompilerThread *ct, bool do_it) {
+  assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here");
+  if (!ReduceNumberOfCompilerThreads) return false;
+
+  AbstractCompiler *compiler = ct->compiler();
+  int compiler_count = compiler->num_compiler_threads();
+  bool c1 = compiler->is_c1();
+
+  // Keep at least 1 compiler thread of each type.
+  if (compiler_count < 2) return false;
+
+  // Keep thread alive for at least some time.
+  if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false;
+
+  // We only allow the last compiler thread of each type to get removed.
+  jobject last_compiler = c1 ? CompileBroker::compiler1_object(compiler_count - 1)
+                             : CompileBroker::compiler2_object(compiler_count - 1);
+  if (oopDesc::equals(ct->threadObj(), JNIHandles::resolve_non_null(last_compiler))) {
+    if (do_it) {
+      assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
+      compiler->set_num_compiler_threads(compiler_count - 1);
+    }
+    return true;
+  }
+  return false;
+}
+
+/**
  * Add a CompileTask to a CompileQueue.
  */
 void CompileQueue::add(CompileTask* task) {
@@ -383,6 +425,11 @@
     // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
     // is not critical and we do not want idle compiler threads to wake up too often.
     MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
+
+    if (UseDynamicNumberOfCompilerThreads && _first == NULL) {
+      // Still nothing to compile. Give caller a chance to stop this thread.
+      if (can_remove(CompilerThread::current(), false)) return NULL;
+    }
   }
 
   if (CompileBroker::is_compilation_disabled_forever()) {
@@ -532,8 +579,8 @@
     return;
   }
   // Set the interface to the current compiler(s).
-  int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
-  int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
+  _c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
+  _c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
 
 #if INCLUDE_JVMCI
   if (EnableJVMCI) {
@@ -545,35 +592,35 @@
       if (FLAG_IS_DEFAULT(JVMCIThreads)) {
         if (BootstrapJVMCI) {
           // JVMCI will bootstrap so give it more threads
-          c2_count = MIN2(32, os::active_processor_count());
+          _c2_count = MIN2(32, os::active_processor_count());
         }
       } else {
-        c2_count = JVMCIThreads;
+        _c2_count = JVMCIThreads;
       }
       if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
       } else {
-        c1_count = JVMCIHostThreads;
+        _c1_count = JVMCIHostThreads;
       }
     }
   }
 #endif // INCLUDE_JVMCI
 
 #ifdef COMPILER1
-  if (c1_count > 0) {
+  if (_c1_count > 0) {
     _compilers[0] = new Compiler();
   }
 #endif // COMPILER1
 
 #ifdef COMPILER2
   if (true JVMCI_ONLY( && !UseJVMCICompiler)) {
-    if (c2_count > 0) {
+    if (_c2_count > 0) {
       _compilers[1] = new C2Compiler();
     }
   }
 #endif // COMPILER2
 
   // Start the compiler thread(s) and the sweeper thread
-  init_compiler_sweeper_threads(c1_count, c2_count);
+  init_compiler_sweeper_threads();
   // totalTime performance counter is always created as it is required
   // by the implementation of java.lang.management.CompilationMBean.
   {
@@ -679,29 +726,38 @@
   _initialized = true;
 }
 
-JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
-                                       AbstractCompiler* comp, bool compiler_thread, TRAPS) {
-  JavaThread* thread = NULL;
-  Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK_0);
+Handle CompileBroker::create_thread_oop(const char* name, TRAPS) {
+  Klass* k = SystemDictionary::find(vmSymbols::java_lang_Thread(), Handle(), Handle(), CHECK_NH);
+  assert(k != NULL, "must be initialized");
   InstanceKlass* klass = InstanceKlass::cast(k);
-  instanceHandle thread_oop = klass->allocate_instance_handle(CHECK_0);
-  Handle string = java_lang_String::create_from_str(name, CHECK_0);
+  instanceHandle thread_handle = klass->allocate_instance_handle(CHECK_NH);
+  Handle string = java_lang_String::create_from_str(name, CHECK_NH);
 
   // Initialize thread_oop to put it into the system threadGroup
-  Handle thread_group (THREAD,  Universe::system_thread_group());
+  Handle thread_group(THREAD, Universe::system_thread_group());
   JavaValue result(T_VOID);
-  JavaCalls::call_special(&result, thread_oop,
+  JavaCalls::call_special(&result, thread_handle,
                        klass,
                        vmSymbols::object_initializer_name(),
                        vmSymbols::threadgroup_string_void_signature(),
                        thread_group,
                        string,
-                       CHECK_0);
+                       CHECK_NH);
 
+  return thread_handle;
+}
+
+
+JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queue,
+                                       AbstractCompiler* comp, bool compiler_thread, TRAPS) {
+  JavaThread* thread = NULL;
   {
     MutexLocker mu(Threads_lock, THREAD);
     if (compiler_thread) {
-      thread = new CompilerThread(queue, counters);
+      if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
+        CompilerCounters* counters = new CompilerCounters();
+        thread = new CompilerThread(queue, counters);
+      }
     } else {
       thread = new CodeCacheSweeperThread();
     }
@@ -720,13 +776,13 @@
 
     if (thread != NULL && thread->osthread() != NULL) {
 
-      java_lang_Thread::set_thread(thread_oop(), thread);
+      java_lang_Thread::set_thread(JNIHandles::resolve_non_null(thread_handle), thread);
 
       // Note that this only sets the JavaThread _priority field, which by
       // definition is limited to Java priorities and not OS priorities.
       // The os-priority is set in the CompilerThread startup code itself
 
-      java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+      java_lang_Thread::set_priority(JNIHandles::resolve_non_null(thread_handle), NearMaxPriority);
 
       // Note that we cannot call os::set_priority because it expects Java
       // priorities and we are *explicitly* using OS priorities so that it's
@@ -743,9 +799,9 @@
       }
       os::set_native_priority(thread, native_prio);
 
-      java_lang_Thread::set_daemon(thread_oop());
+      java_lang_Thread::set_daemon(JNIHandles::resolve_non_null(thread_handle));
 
-      thread->set_threadObj(thread_oop());
+      thread->set_threadObj(JNIHandles::resolve_non_null(thread_handle));
       if (compiler_thread) {
         thread->as_CompilerThread()->set_compiler(comp);
       }
@@ -756,6 +812,12 @@
 
   // First release lock before aborting VM.
   if (thread == NULL || thread->osthread() == NULL) {
+    if (UseDynamicNumberOfCompilerThreads && comp->num_compiler_threads() > 0) {
+      if (thread != NULL) {
+        thread->smr_delete();
+      }
+      return NULL;
+    }
     vm_exit_during_initialization("java.lang.OutOfMemoryError",
                                   os::native_thread_creation_failed_msg());
   }
@@ -767,51 +829,123 @@
 }
 
 
-void CompileBroker::init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count) {
+void CompileBroker::init_compiler_sweeper_threads() {
   EXCEPTION_MARK;
 #if !defined(ZERO)
-  assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
+  assert(_c2_count > 0 || _c1_count > 0, "No compilers?");
 #endif // !ZERO
   // Initialize the compilation queue
-  if (c2_compiler_count > 0) {
+  if (_c2_count > 0) {
     const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
     _c2_compile_queue  = new CompileQueue(name);
-    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
+    _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler);
+    _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler);
   }
-  if (c1_compiler_count > 0) {
+  if (_c1_count > 0) {
     _c1_compile_queue  = new CompileQueue("C1 compile queue");
-    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
+    _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler);
+    _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler);
   }
 
-  int compiler_count = c1_compiler_count + c2_compiler_count;
+  char name_buffer[256];
 
-  char name_buffer[256];
-  const bool compiler_thread = true;
-  for (int i = 0; i < c2_compiler_count; i++) {
+  for (int i = 0; i < _c2_count; i++) {
     // Create a name for our thread.
     sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
-    CompilerCounters* counters = new CompilerCounters();
-    make_thread(name_buffer, _c2_compile_queue, counters, _compilers[1], compiler_thread, CHECK);
+    jobject thread_handle = JNIHandles::make_global(create_thread_oop(name_buffer, THREAD));
+    _compiler2_objects[i] = thread_handle;
+    _compiler2_logs[i] = NULL;
+
+    if (!UseDynamicNumberOfCompilerThreads || i == 0) {
+      JavaThread *ct = make_thread(thread_handle, _c2_compile_queue, _compilers[1], /* compiler_thread */ true, CHECK);
+      assert(ct != NULL, "should have been handled for initial thread");
+      _compilers[1]->set_num_compiler_threads(i + 1);
+      if (TraceCompilerThreads) {
+        ResourceMark rm;
+        MutexLocker mu(Threads_lock);
+        tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
+      }
+    }
   }
 
-  for (int i = c2_compiler_count; i < compiler_count; i++) {
+  for (int i = 0; i < _c1_count; i++) {
     // Create a name for our thread.
     sprintf(name_buffer, "C1 CompilerThread%d", i);
-    CompilerCounters* counters = new CompilerCounters();
-    // C1
-    make_thread(name_buffer, _c1_compile_queue, counters, _compilers[0], compiler_thread, CHECK);
+    jobject thread_handle = JNIHandles::make_global(create_thread_oop(name_buffer, THREAD));
+    _compiler1_objects[i] = thread_handle;
+    _compiler1_logs[i] = NULL;
+
+    if (!UseDynamicNumberOfCompilerThreads || i == 0) {
+      JavaThread *ct = make_thread(thread_handle, _c1_compile_queue, _compilers[0], /* compiler_thread */ true, CHECK);
+      assert(ct != NULL, "should have been handled for initial thread");
+      _compilers[0]->set_num_compiler_threads(i + 1);
+      if (TraceCompilerThreads) {
+        ResourceMark rm;
+        MutexLocker mu(Threads_lock);
+        tty->print_cr("Added initial compiler thread %s", ct->get_thread_name());
+      }
+    }
   }
 
   if (UsePerfData) {
-    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
+    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK);
   }
 
   if (MethodFlushing) {
     // Initialize the sweeper thread
-    make_thread("Sweeper thread", NULL, NULL, NULL, false, CHECK);
+    jobject thread_handle = JNIHandles::make_local(THREAD, create_thread_oop("Sweeper thread", THREAD)());
+    make_thread(thread_handle, NULL, NULL, /* compiler_thread */ false, CHECK);
   }
 }
 
+void CompileBroker::possibly_add_compiler_threads() {
+  EXCEPTION_MARK;
+
+  julong available_memory = os::available_memory();
+  // Only do attempt to start additional threads if the lock is free.
+  if (!CompileThread_lock->try_lock()) return;
+
+  if (_c2_compile_queue != NULL) {
+    int old_c2_count = _compilers[1]->num_compiler_threads();
+    int new_c2_count = MIN3(_c2_count,
+        _c2_compile_queue->size() / 2,
+        (int)(available_memory / 200*M));
+
+    for (int i = old_c2_count; i < new_c2_count; i++) {
+      JavaThread *ct = make_thread(compiler2_object(i), _c2_compile_queue, _compilers[1], true, CHECK);
+      if (ct == NULL) break;
+      _compilers[1]->set_num_compiler_threads(i + 1);
+      if (TraceCompilerThreads) {
+        ResourceMark rm;
+        MutexLocker mu(Threads_lock);
+        tty->print_cr("Added compiler thread %s (available memory: %dMB)",
+                      ct->get_thread_name(), (int)(available_memory/M));
+      }
+    }
+  }
+
+  if (_c1_compile_queue != NULL) {
+    int old_c1_count = _compilers[0]->num_compiler_threads();
+    int new_c1_count = MIN3(_c1_count,
+        _c1_compile_queue->size() / 4,
+        (int)(available_memory / 100*M));
+
+    for (int i = old_c1_count; i < new_c1_count; i++) {
+      JavaThread *ct = make_thread(compiler1_object(i), _c1_compile_queue, _compilers[0], true, CHECK);
+      if (ct == NULL) break;
+      _compilers[0]->set_num_compiler_threads(i + 1);
+      if (TraceCompilerThreads) {
+        ResourceMark rm;
+        MutexLocker mu(Threads_lock);
+        tty->print_cr("Added compiler thread %s (available memory: %dMB)",
+                      ct->get_thread_name(), (int)(available_memory/M));
+      }
+    }
+  }
+
+  CompileThread_lock->unlock();
+}
+
 
 /**
  * Set the methods on the stack as on_stack so that redefine classes doesn't
@@ -1546,6 +1680,49 @@
   }
 }
 
+/**
+ * Helper function to create new or reuse old CompileLog.
+ */
+CompileLog* CompileBroker::get_log(CompilerThread* ct) {
+  if (!LogCompilation) return NULL;
+
+  AbstractCompiler *compiler = ct->compiler();
+  bool c1 = compiler->is_c1();
+  jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects;
+  assert(compiler_objects != NULL, "must be initialized at this point");
+  CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs;
+  assert(logs != NULL, "must be initialized at this point");
+  int count = c1 ? _c1_count : _c2_count;
+
+  // Find Compiler number by its threadObj.
+  oop compiler_obj = ct->threadObj();
+  int compiler_number = 0;
+  bool found = false;
+  for (; compiler_number < count; compiler_number++) {
+    if (oopDesc::equals(JNIHandles::resolve_non_null(compiler_objects[compiler_number]), compiler_obj)) {
+      found = true;
+      break;
+    }
+  }
+  assert(found, "Compiler must exist at this point");
+
+  // Determine pointer for this thread's log.
+  CompileLog** log_ptr = &logs[compiler_number];
+
+  // Return old one if it exists.
+  CompileLog* log = *log_ptr;
+  if (log != NULL) {
+    ct->init_log(log);
+    return log;
+  }
+
+  // Create a new one and remember it.
+  init_compiler_thread_log();
+  log = ct->log();
+  *log_ptr = log;
+  return log;
+}
+
 // ------------------------------------------------------------------
 // CompileBroker::compiler_thread_loop
 //
@@ -1568,10 +1745,7 @@
   }
 
   // Open a log.
-  if (LogCompilation) {
-    init_compiler_thread_log();
-  }
-  CompileLog* log = thread->log();
+  CompileLog* log = get_log(thread);
   if (log != NULL) {
     log->begin_elem("start_compile_thread name='%s' thread='" UINTX_FORMAT "' process='%d'",
                     thread->name(),
@@ -1586,6 +1760,8 @@
     return;
   }
 
+  thread->start_idle_timer();
+
   // Poll for new compilation tasks as long as the JVM runs. Compilation
   // should only be disabled if something went wrong while initializing the
   // compiler runtimes. This, in turn, should not happen. The only known case
@@ -1597,9 +1773,24 @@
 
     CompileTask* task = queue->get();
     if (task == NULL) {
+      if (UseDynamicNumberOfCompilerThreads) {
+        // Access compiler_count under lock to enforce consistency.
+        MutexLocker only_one(CompileThread_lock);
+        if (can_remove(thread, true)) {
+          if (TraceCompilerThreads) {
+            tty->print_cr("Removing compiler thread %s after " JLONG_FORMAT " ms idle time",
+                          thread->name(), thread->idle_time_millis());
+          }
+          return; // Stop this thread.
+        }
+      }
       continue;
     }
 
+    if (UseDynamicNumberOfCompilerThreads) {
+      possibly_add_compiler_threads();
+    }
+
     // Give compiler threads an extra quanta.  They tend to be bursty and
     // this helps the compiler to finish up the job.
     if (CompilerThreadHintNoPreempt) {
@@ -1618,6 +1809,7 @@
       // Compile the method.
       if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
         invoke_compiler_on_method(task);
+        thread->start_idle_timer();
       } else {
         // After compilation is disabled, remove remaining methods from queue
         method->clear_queued_for_compilation();
--- a/src/hotspot/share/compiler/compileBroker.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -161,6 +161,15 @@
   // The installed compiler(s)
   static AbstractCompiler* _compilers[2];
 
+  // The maximum numbers of compiler threads to be determined during startup.
+  static int _c1_count, _c2_count;
+
+  // An array of compiler thread Java objects
+  static jobject *_compiler1_objects, *_compiler2_objects;
+
+  // An array of compiler logs
+  static CompileLog **_compiler1_logs, **_compiler2_logs;
+
   // These counters are used for assigning id's to each compilation
   static volatile jint _compilation_id;
   static volatile jint _osr_compilation_id;
@@ -219,8 +228,11 @@
 
   static volatile int _print_compilation_warning;
 
-  static JavaThread* make_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, bool compiler_thread, TRAPS);
-  static void init_compiler_sweeper_threads(int c1_compiler_count, int c2_compiler_count);
+  static Handle create_thread_oop(const char* name, TRAPS);
+  static JavaThread* make_thread(jobject thread_oop, CompileQueue* queue,
+                                 AbstractCompiler* comp, bool compiler_thread, TRAPS);
+  static void init_compiler_sweeper_threads();
+  static void possibly_add_compiler_threads();
   static bool compilation_is_complete  (const methodHandle& method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded);
   static void preload_classes          (const methodHandle& method, TRAPS);
@@ -367,6 +379,21 @@
   // compiler name for debugging
   static const char* compiler_name(int comp_level);
 
+  // Provide access to compiler thread Java objects
+  static jobject compiler1_object(int idx) {
+    assert(_compiler1_objects != NULL, "must be initialized");
+    assert(idx < _c1_count, "oob");
+    return _compiler1_objects[idx];
+  }
+
+  static jobject compiler2_object(int idx) {
+    assert(_compiler2_objects != NULL, "must be initialized");
+    assert(idx < _c2_count, "oob");
+    return _compiler2_objects[idx];
+  }
+
+  static CompileLog* get_log(CompilerThread* ct);
+
   static int get_total_compile_count() {          return _total_compile_count; }
   static int get_total_bailout_count() {          return _total_bailout_count; }
   static int get_total_invalidated_count() {      return _total_invalidated_count; }
--- a/src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/commandLineFlagConstraintsGC.hpp"
-#include "memory/universe.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-static Flag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
-  // CMSWorkQueueDrainThreshold is verified to be less than max_juint
-  if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
-    CommandLineError::print(verbose,
-                            "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
-                            UINTX_FORMAT ") is too large\n",
-                            threads, threshold);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
-  // To avoid overflow at ParScanClosure::do_oop_work.
-  if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
-    CommandLineError::print(verbose,
-                            "ParallelGCThreads (" UINT32_FORMAT ") must be "
-                            "less than or equal to " UINT32_FORMAT " for CMS GC\n",
-                            value, (max_jint / 10));
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
-}
-Flag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
-    CommandLineError::print(verbose,
-                            "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
-                            "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
-                            value, ((uintx)max_jint / (uintx)ParallelGCThreads));
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    // ParGCCardsPerStrideChunk should be compared with card table size.
-    size_t heap_size = Universe::heap()->reserved_region().word_size();
-    CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
-    size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
-
-    if ((size_t)value > card_table_size) {
-      CommandLineError::print(verbose,
-                              "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
-                              "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
-                              value, card_table_size);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-
-    // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
-    // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
-    // not to make an overflow with ParallelGCThreads from its constraint function.
-    uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
-    uintx ergo_max = max_uintx / n_strides;
-    if ((uintx)value > ergo_max) {
-      CommandLineError::print(verbose,
-                              "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
-                              "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
-                              value, ergo_max);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
-  Flag::Error status = Flag::SUCCESS;
-
-  if (UseConcMarkSweepGC) {
-    if (value > CMSOldPLABMax) {
-      CommandLineError::print(verbose,
-                              "CMSOldPLABMin (" SIZE_FORMAT ") must be "
-                              "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
-                              value, CMSOldPLABMax);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-    status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
-  }
-  return status;
-}
-
-Flag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
-  Flag::Error status = Flag::SUCCESS;
-
-  if (UseConcMarkSweepGC) {
-    status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
-  }
-  return status;
-}
-
-static Flag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
-    const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
-    if (value > ergo_max) {
-      CommandLineError::print(verbose,
-                              "%s (" SIZE_FORMAT ") must be "
-                              "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
-                              "which is based on the maximum size of the old generation of the Java heap\n",
-                              name, value, ergo_max);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
-  Flag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
-
-  if (status == Flag::SUCCESS && UseConcMarkSweepGC) {
-    // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
-    // to be aligned to CardTable::card_size * BitsPerWord.
-    // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
-    // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
-    if (value % HeapWordSize != 0) {
-      CommandLineError::print(verbose,
-                              "CMSRescanMultiple (" SIZE_FORMAT ") must be "
-                              "a multiple of " SIZE_FORMAT "\n",
-                              value, HeapWordSize);
-      status = Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-
-  return status;
-}
-
-Flag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
-  return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
-}
-
-Flag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
-    CommandLineError::print(verbose,
-                            "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
-                            "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
-                            value, CMSPrecleanNumerator);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
-    CommandLineError::print(verbose,
-                            "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
-                            "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
-                            value, CMSPrecleanDenominator);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity();
-    if (value > max_uintx - max_capacity) {
-    CommandLineError::print(verbose,
-                            "CMSSamplingGrain (" UINTX_FORMAT ") must be "
-                            "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
-                            value, max_uintx - max_capacity);
-    return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
-  // Skip for current default value.
-  if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
-    // CMSBitMapYieldQuantum should be compared with mark bitmap size.
-    ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
-    size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
-
-    if (value > bitmap_size) {
-      CommandLineError::print(verbose,
-                              "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
-                              "be less than or equal to bitmap size (" SIZE_FORMAT ") "
-                              "whose size corresponds to the size of old generation of the Java heap\n",
-                              value, bitmap_size);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
-  if (value == 0) {
-    CommandLineError::print(verbose,
-                            "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
-                            value);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
-  // replenishing the local per-worker free list caches.
-  // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
-  return MaxPLABSizeBounds("OldPLABSize", value, verbose);
-}
--- a/src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
-#define SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// CMS Flag Constraints
-Flag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
-Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
-Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
-
-// CMS Subconstraints
-Flag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
-Flag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
-
-#endif // SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -68,6 +68,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
+#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/jvmFlagConstraintsGC.hpp"
+#include "memory/universe.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
+  // CMSWorkQueueDrainThreshold is verified to be less than max_juint
+  if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
+    CommandLineError::print(verbose,
+                            "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
+                            UINTX_FORMAT ") is too large\n",
+                            threads, threshold);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
+  // To avoid overflow at ParScanClosure::do_oop_work.
+  if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
+    CommandLineError::print(verbose,
+                            "ParallelGCThreads (" UINT32_FORMAT ") must be "
+                            "less than or equal to " UINT32_FORMAT " for CMS GC\n",
+                            value, (max_jint / 10));
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
+}
+JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
+  if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
+    CommandLineError::print(verbose,
+                            "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
+                            "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+                            value, ((uintx)max_jint / (uintx)ParallelGCThreads));
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
+  if (UseConcMarkSweepGC) {
+    // ParGCCardsPerStrideChunk should be compared with card table size.
+    size_t heap_size = Universe::heap()->reserved_region().word_size();
+    CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
+    size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
+
+    if ((size_t)value > card_table_size) {
+      CommandLineError::print(verbose,
+                              "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
+                              "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
+                              value, card_table_size);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+
+    // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
+    // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
+    // not to make an overflow with ParallelGCThreads from its constraint function.
+    uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
+    uintx ergo_max = max_uintx / n_strides;
+    if ((uintx)value > ergo_max) {
+      CommandLineError::print(verbose,
+                              "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
+                              "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+                              value, ergo_max);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
+  JVMFlag::Error status = JVMFlag::SUCCESS;
+
+  if (UseConcMarkSweepGC) {
+    if (value > CMSOldPLABMax) {
+      CommandLineError::print(verbose,
+                              "CMSOldPLABMin (" SIZE_FORMAT ") must be "
+                              "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
+                              value, CMSOldPLABMax);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+    status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
+  }
+  return status;
+}
+
+JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
+  JVMFlag::Error status = JVMFlag::SUCCESS;
+
+  if (UseConcMarkSweepGC) {
+    status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
+  }
+  return status;
+}
+
+static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
+  if (UseConcMarkSweepGC) {
+    ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+    const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
+    if (value > ergo_max) {
+      CommandLineError::print(verbose,
+                              "%s (" SIZE_FORMAT ") must be "
+                              "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
+                              "which is based on the maximum size of the old generation of the Java heap\n",
+                              name, value, ergo_max);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
+  JVMFlag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
+
+  if (status == JVMFlag::SUCCESS && UseConcMarkSweepGC) {
+    // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
+    // to be aligned to CardTable::card_size * BitsPerWord.
+    // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
+    // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
+    if (value % HeapWordSize != 0) {
+      CommandLineError::print(verbose,
+                              "CMSRescanMultiple (" SIZE_FORMAT ") must be "
+                              "a multiple of " SIZE_FORMAT "\n",
+                              value, HeapWordSize);
+      status = JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+
+  return status;
+}
+
+JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
+  return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
+}
+
+JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
+  if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
+    CommandLineError::print(verbose,
+                            "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
+                            "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
+                            value, CMSPrecleanNumerator);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
+  if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
+    CommandLineError::print(verbose,
+                            "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
+                            "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
+                            value, CMSPrecleanDenominator);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
+  if (UseConcMarkSweepGC) {
+    size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity();
+    if (value > max_uintx - max_capacity) {
+    CommandLineError::print(verbose,
+                            "CMSSamplingGrain (" UINTX_FORMAT ") must be "
+                            "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
+                            value, max_uintx - max_capacity);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
+  if (UseConcMarkSweepGC) {
+    return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
+  // Skip for current default value.
+  if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
+    // CMSBitMapYieldQuantum should be compared with mark bitmap size.
+    ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+    size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
+
+    if (value > bitmap_size) {
+      CommandLineError::print(verbose,
+                              "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
+                              "be less than or equal to bitmap size (" SIZE_FORMAT ") "
+                              "whose size corresponds to the size of old generation of the Java heap\n",
+                              value, bitmap_size);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
+  if (value == 0) {
+    CommandLineError::print(verbose,
+                            "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
+                            value);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
+  // replenishing the local per-worker free list caches.
+  // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
+  return MaxPLABSizeBounds("OldPLABSize", value, verbose);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
+#define SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// CMS Flag Constraints
+JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
+JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
+
+// CMS Subconstraints
+JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
+JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
+
+#endif // SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/heapRegionBounds.inline.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-Flag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose) {
-  if (!UseG1GC) return Flag::SUCCESS;
-
-  // Default value of G1RSetRegionEntries=0 means will be set ergonomically.
-  // Minimum value is 1.
-  if (FLAG_IS_CMDLINE(G1RSetRegionEntries) && (value < 1)) {
-    CommandLineError::print(verbose,
-                            "G1RSetRegionEntries (" INTX_FORMAT ") must be "
-                            "greater than or equal to 1\n",
-                            value);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose) {
-  if (!UseG1GC) return Flag::SUCCESS;
-
-  // Default value of G1RSetSparseRegionEntries=0 means will be set ergonomically.
-  // Minimum value is 1.
-  if (FLAG_IS_CMDLINE(G1RSetSparseRegionEntries) && (value < 1)) {
-    CommandLineError::print(verbose,
-                            "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
-                            "greater than or equal to 1\n",
-                            value);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
-  if (!UseG1GC) return Flag::SUCCESS;
-
-  // Default value of G1HeapRegionSize=0 means will be set ergonomically.
-  if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
-    CommandLineError::print(verbose,
-                            "G1HeapRegionSize (" SIZE_FORMAT ") must be "
-                            "greater than or equal to ergonomic heap region minimum size\n",
-                            value);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose) {
-  if (!UseG1GC) return Flag::SUCCESS;
-
-  if (value > G1MaxNewSizePercent) {
-    CommandLineError::print(verbose,
-                            "G1NewSizePercent (" UINTX_FORMAT ") must be "
-                            "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
-                            value, G1MaxNewSizePercent);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
-  if (!UseG1GC) return Flag::SUCCESS;
-
-  if (value < G1NewSizePercent) {
-    CommandLineError::print(verbose,
-                            "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
-                            "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
-                            value, G1NewSizePercent);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose) {
-  if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) {
-    CommandLineError::print(verbose,
-                            "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
-                            "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
-                            value, GCPauseIntervalMillis);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose) {
-  if (UseG1GC) {
-    if (FLAG_IS_CMDLINE(GCPauseIntervalMillis)) {
-      if (value < 1) {
-        CommandLineError::print(verbose,
-                                "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
-                                "greater than or equal to 1\n",
-                                value);
-        return Flag::VIOLATES_CONSTRAINT;
-      }
-
-      if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
-        CommandLineError::print(verbose,
-                                "GCPauseIntervalMillis cannot be set "
-                                "without setting MaxGCPauseMillis\n");
-        return Flag::VIOLATES_CONSTRAINT;
-      }
-
-      if (value <= MaxGCPauseMillis) {
-        CommandLineError::print(verbose,
-                                "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
-                                "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
-                                value, MaxGCPauseMillis);
-        return Flag::VIOLATES_CONSTRAINT;
-      }
-    }
-  }
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
-#ifdef _LP64
-  // Overflow would happen for uint type variable of YoungGenSizer::_min_desired_young_length
-  // when the value to be assigned exceeds uint range.
-  // i.e. result of '(uint)(NewSize / region size(1~32MB))'
-  // So maximum of NewSize should be 'max_juint * 1M'
-  if (UseG1GC && (value > (max_juint * 1 * M))) {
-    CommandLineError::print(verbose,
-                            "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
-                            value);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-#endif // _LP64
-  return Flag::SUCCESS;
-}
-
-size_t MaxSizeForHeapAlignmentG1() {
-  return HeapRegionBounds::max_size();
-}
--- a/src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
-#define SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// G1 Flag Constraints
-Flag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose);
-Flag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose);
-Flag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose);
-Flag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose);
-
-// G1 Subconstraints
-Flag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose);
-Flag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose);
-Flag::Error MaxSizeForHeapAlignmentG1(const char* name, size_t value, bool verbose);
-Flag::Error NewSizeConstraintFuncG1(size_t value, bool verbose);
-
-size_t MaxSizeForHeapAlignmentG1();
-
-#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSG1_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -84,6 +84,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/orderAccess.inline.hpp"
@@ -3353,7 +3354,7 @@
   }
 
   void clean_nmethod_postponed(CompiledMethod* nm) {
-    nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
+    nm->do_unloading_parallel_postponed();
   }
 
   static const int MaxClaimNmethods = 16;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1695,21 +1695,18 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  assert(has_overflown() || _global_mark_stack.is_empty(),
-         "Mark stack should be empty (unless it has overflown)");
+  if (has_overflown()) {
+    // We can not trust g1_is_alive if the marking stack overflowed
+    return;
+  }
+
+  assert(_global_mark_stack.is_empty(), "Marking should have completed");
 
   {
     GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
     WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
   }
 
-  if (has_overflown()) {
-    // We can not trust g1_is_alive if the marking stack overflowed
-    return;
-  }
-
-  assert(_global_mark_stack.is_empty(), "Marking should have completed");
-
   // Unload Klasses, String, Symbols, Code Cache, etc.
   if (ClassUnloadingWithConcurrentMark) {
     GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/heapRegionBounds.inline.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+JVMFlag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose) {
+  if (!UseG1GC) return JVMFlag::SUCCESS;
+
+  // Default value of G1RSetRegionEntries=0 means will be set ergonomically.
+  // Minimum value is 1.
+  if (FLAG_IS_CMDLINE(G1RSetRegionEntries) && (value < 1)) {
+    CommandLineError::print(verbose,
+                            "G1RSetRegionEntries (" INTX_FORMAT ") must be "
+                            "greater than or equal to 1\n",
+                            value);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose) {
+  if (!UseG1GC) return JVMFlag::SUCCESS;
+
+  // Default value of G1RSetSparseRegionEntries=0 means will be set ergonomically.
+  // Minimum value is 1.
+  if (FLAG_IS_CMDLINE(G1RSetSparseRegionEntries) && (value < 1)) {
+    CommandLineError::print(verbose,
+                            "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
+                            "greater than or equal to 1\n",
+                            value);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
+  if (!UseG1GC) return JVMFlag::SUCCESS;
+
+  // Default value of G1HeapRegionSize=0 means will be set ergonomically.
+  if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
+    CommandLineError::print(verbose,
+                            "G1HeapRegionSize (" SIZE_FORMAT ") must be "
+                            "greater than or equal to ergonomic heap region minimum size\n",
+                            value);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose) {
+  if (!UseG1GC) return JVMFlag::SUCCESS;
+
+  if (value > G1MaxNewSizePercent) {
+    CommandLineError::print(verbose,
+                            "G1NewSizePercent (" UINTX_FORMAT ") must be "
+                            "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
+                            value, G1MaxNewSizePercent);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
+  if (!UseG1GC) return JVMFlag::SUCCESS;
+
+  if (value < G1NewSizePercent) {
+    CommandLineError::print(verbose,
+                            "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
+                            "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
+                            value, G1NewSizePercent);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose) {
+  if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) {
+    CommandLineError::print(verbose,
+                            "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
+                            "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
+                            value, GCPauseIntervalMillis);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose) {
+  if (UseG1GC) {
+    if (FLAG_IS_CMDLINE(GCPauseIntervalMillis)) {
+      if (value < 1) {
+        CommandLineError::print(verbose,
+                                "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+                                "greater than or equal to 1\n",
+                                value);
+        return JVMFlag::VIOLATES_CONSTRAINT;
+      }
+
+      if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+        CommandLineError::print(verbose,
+                                "GCPauseIntervalMillis cannot be set "
+                                "without setting MaxGCPauseMillis\n");
+        return JVMFlag::VIOLATES_CONSTRAINT;
+      }
+
+      if (value <= MaxGCPauseMillis) {
+        CommandLineError::print(verbose,
+                                "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+                                "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
+                                value, MaxGCPauseMillis);
+        return JVMFlag::VIOLATES_CONSTRAINT;
+      }
+    }
+  }
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
+#ifdef _LP64
+  // Overflow would happen for uint type variable of YoungGenSizer::_min_desired_young_length
+  // when the value to be assigned exceeds uint range.
+  // i.e. result of '(uint)(NewSize / region size(1~32MB))'
+  // So maximum of NewSize should be 'max_juint * 1M'
+  if (UseG1GC && (value > (max_juint * 1 * M))) {
+    CommandLineError::print(verbose,
+                            "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
+                            value);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+#endif // _LP64
+  return JVMFlag::SUCCESS;
+}
+
+size_t MaxSizeForHeapAlignmentG1() {
+  return HeapRegionBounds::max_size();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
+#define SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// G1 Flag Constraints
+JVMFlag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose);
+JVMFlag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose);
+JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose);
+
+// G1 Subconstraints
+JVMFlag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose);
+JVMFlag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose);
+JVMFlag::Error MaxSizeForHeapAlignmentG1(const char* name, size_t value, bool verbose);
+JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose);
+
+size_t MaxSizeForHeapAlignmentG1();
+
+#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSG1_HPP
--- a/src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-Flag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose) {
-  // Parallel GC passes ParallelGCThreads when creating GrowableArray as 'int' type parameter.
-  // So can't exceed with "max_jint"
-
-  if (UseParallelGC && (value > (uint)max_jint)) {
-    CommandLineError::print(verbose,
-                            "ParallelGCThreads (" UINT32_FORMAT ") must be "
-                            "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
-                            value, max_jint);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
-  // InitialTenuringThreshold is only used for ParallelGC.
-  if (UseParallelGC && (value > MaxTenuringThreshold)) {
-      CommandLineError::print(verbose,
-                              "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
-                              "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
-                              value, MaxTenuringThreshold);
-      return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
-  // As only ParallelGC uses InitialTenuringThreshold,
-  // we don't need to compare InitialTenuringThreshold with MaxTenuringThreshold.
-  if (UseParallelGC && (value < InitialTenuringThreshold)) {
-    CommandLineError::print(verbose,
-                            "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
-                            "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
-                            value, InitialTenuringThreshold);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-
-  return Flag::SUCCESS;
-}
--- a/src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
-#define SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// Parallel Subconstraints
-Flag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose);
-Flag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
-Flag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
-
-#endif // SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose) {
+  // Parallel GC passes ParallelGCThreads when creating GrowableArray as 'int' type parameter.
+  // So can't exceed with "max_jint"
+
+  if (UseParallelGC && (value > (uint)max_jint)) {
+    CommandLineError::print(verbose,
+                            "ParallelGCThreads (" UINT32_FORMAT ") must be "
+                            "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
+                            value, max_jint);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
+  // InitialTenuringThreshold is only used for ParallelGC.
+  if (UseParallelGC && (value > MaxTenuringThreshold)) {
+      CommandLineError::print(verbose,
+                              "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
+                              "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
+                              value, MaxTenuringThreshold);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
+  // As only ParallelGC uses InitialTenuringThreshold,
+  // we don't need to compare InitialTenuringThreshold with MaxTenuringThreshold.
+  if (UseParallelGC && (value < InitialTenuringThreshold)) {
+    CommandLineError::print(verbose,
+                            "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
+                            "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
+                            value, InitialTenuringThreshold);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+
+  return JVMFlag::SUCCESS;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
+#define SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Parallel Subconstraints
+JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose);
+JVMFlag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
+JVMFlag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
+
+#endif // SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -51,6 +51,7 @@
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -172,6 +172,22 @@
   return false;
 }
 
+bool CollectedHeap::is_oop(oop object) const {
+  if (!check_obj_alignment(object)) {
+    return false;
+  }
+
+  if (!is_in_reserved(object)) {
+    return false;
+  }
+
+  if (is_in_reserved(object->klass_or_null())) {
+    return false;
+  }
+
+  return true;
+}
+
 // Memory state functions.
 
 
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -591,6 +591,8 @@
   virtual oop pin_object(JavaThread* thread, oop obj);
   virtual void unpin_object(JavaThread* thread, oop obj);
 
+  virtual bool is_oop(oop object) const;
+
   // Non product verification and debugging.
 #ifndef PRODUCT
   // Support for PromotionFailureALot.  Return true if it's time to cause a
--- a/src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,470 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/commandLineFlagConstraintsGC.hpp"
-#include "gc/shared/plab.hpp"
-#include "gc/shared/threadLocalAllocBuffer.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/align.hpp"
-#include "utilities/defaultStream.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/g1/commandLineFlagConstraintsG1.hpp"
-#include "gc/parallel/commandLineFlagConstraintsParallel.hpp"
-#endif
-#ifdef COMPILER1
-#include "c1/c1_globals.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "opto/c2_globals.hpp"
-#endif // COMPILER2
-
-// Some flags that have default values that indicate that the
-// JVM should automatically determine an appropriate value
-// for that flag.  In those cases it is only appropriate for the
-// constraint checking to be done if the user has specified the
-// value(s) of the flag(s) on the command line.  In the constraint
-// checking functions,  FLAG_IS_CMDLINE() is used to check if
-// the flag has been set by the user and so should be checked.
-
-// As ParallelGCThreads differs among GC modes, we need constraint function.
-Flag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
-  Flag::Error status = Flag::SUCCESS;
-
-#if INCLUDE_ALL_GCS
-  status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-
-  status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  return status;
-}
-
-// As ConcGCThreads should be smaller than ParallelGCThreads,
-// we need constraint function.
-Flag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  // CMS and G1 GCs use ConcGCThreads.
-  if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
-    CommandLineError::print(verbose,
-                            "ConcGCThreads (" UINT32_FORMAT ") must be "
-                            "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
-                            value, ParallelGCThreads);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-#endif
-  return Flag::SUCCESS;
-}
-
-static Flag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
-    CommandLineError::print(verbose,
-                            "%s (" SIZE_FORMAT ") must be "
-                            "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
-                            name, value, PLAB::min_size());
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-#endif // INCLUDE_ALL_GCS
-  return Flag::SUCCESS;
-}
-
-Flag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
-    CommandLineError::print(verbose,
-                            "%s (" SIZE_FORMAT ") must be "
-                            "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
-                            name, value, PLAB::max_size());
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-#endif // INCLUDE_ALL_GCS
-  return Flag::SUCCESS;
-}
-
-static Flag::Error MinMaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-  Flag::Error status = MinPLABSizeBounds(name, value, verbose);
-
-  if (status == Flag::SUCCESS) {
-    return MaxPLABSizeBounds(name, value, verbose);
-  }
-  return status;
-}
-
-Flag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose) {
-  return MinMaxPLABSizeBounds("YoungPLABSize", value, verbose);
-}
-
-Flag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
-  Flag::Error status = Flag::SUCCESS;
-
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    return OldPLABSizeConstraintFuncCMS(value, verbose);
-  } else {
-    status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
-  }
-#endif
-  return status;
-}
-
-Flag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
-  if (value > MaxHeapFreeRatio) {
-    CommandLineError::print(verbose,
-                            "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
-                            "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
-                            value, MaxHeapFreeRatio);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
-  if (value < MinHeapFreeRatio) {
-    CommandLineError::print(verbose,
-                            "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
-                            "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
-                            value, MinHeapFreeRatio);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-static Flag::Error CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(size_t maxHeap, intx softRef, bool verbose) {
-  if ((softRef > 0) && ((maxHeap / M) > (max_uintx / softRef))) {
-    CommandLineError::print(verbose,
-                            "Desired lifetime of SoftReferences cannot be expressed correctly. "
-                            "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
-                            "(" INTX_FORMAT ") is too large\n",
-                            maxHeap, softRef);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose) {
-  return CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(MaxHeapSize, value, verbose);
-}
-
-Flag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
-  if (value > MarkStackSizeMax) {
-    CommandLineError::print(verbose,
-                            "MarkStackSize (" SIZE_FORMAT ") must be "
-                            "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
-                            value, MarkStackSizeMax);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
-  if (value > MaxMetaspaceFreeRatio) {
-    CommandLineError::print(verbose,
-                            "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
-                            "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
-                            value, MaxMetaspaceFreeRatio);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
-  if (value < MinMetaspaceFreeRatio) {
-    CommandLineError::print(verbose,
-                            "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
-                            "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
-                            value, MinMetaspaceFreeRatio);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  Flag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  Flag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  // MaxTenuringThreshold=0 means NeverTenure=false && AlwaysTenure=true
-  if ((value == 0) && (NeverTenure || !AlwaysTenure)) {
-    CommandLineError::print(verbose,
-                            "MaxTenuringThreshold (0) should match to NeverTenure=false "
-                            "&& AlwaysTenure=true. But we have NeverTenure=%s "
-                            "AlwaysTenure=%s\n",
-                            NeverTenure ? "true" : "false",
-                            AlwaysTenure ? "true" : "false");
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  Flag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  Flag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
-  size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
-  if (value > aligned_max) {
-    CommandLineError::print(verbose,
-                            "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
-                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
-                            value, aligned_max);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-// To avoid an overflow by 'align_up(value, alignment)'.
-static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
-  size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
-  if (value > aligned_max) {
-    CommandLineError::print(verbose,
-                            "%s (" SIZE_FORMAT ") must be "
-                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
-                            name, value, aligned_max);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-static Flag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
-  size_t heap_alignment;
-
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-    // For G1 GC, we don't know until G1CollectorPolicy is created.
-    heap_alignment = MaxSizeForHeapAlignmentG1();
-  } else
-#endif
-  {
-    heap_alignment = CollectorPolicy::compute_heap_alignment();
-  }
-
-  return MaxSizeForAlignment(name, value, heap_alignment, verbose);
-}
-
-Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
-  return MaxSizeForHeapAlignment("InitialHeapSize", value, verbose);
-}
-
-Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose) {
-  Flag::Error status = MaxSizeForHeapAlignment("MaxHeapSize", value, verbose);
-
-  if (status == Flag::SUCCESS) {
-    status = CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(value, SoftRefLRUPolicyMSPerMB, verbose);
-  }
-  return status;
-}
-
-Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
-  // If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
-  // Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
-  if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
-    CommandLineError::print(verbose,
-                            "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
-                            "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
-                            value, MaxHeapSize, max_uintx);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-
-  return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
-}
-
-Flag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
-  Flag::Error status = NewSizeConstraintFuncG1(value, verbose);
-  if (status != Flag::SUCCESS) {
-    return status;
-  }
-#endif
-
-  return Flag::SUCCESS;
-}
-
-Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose) {
-  // At least, alignment reserve area is needed.
-  if (value < ThreadLocalAllocBuffer::alignment_reserve_in_bytes()) {
-    CommandLineError::print(verbose,
-                            "MinTLABSize (" SIZE_FORMAT ") must be "
-                            "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
-                            value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
-    CommandLineError::print(verbose,
-                            "MinTLABSize (" SIZE_FORMAT ") must be "
-                            "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
-                            value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
-    return Flag::VIOLATES_CONSTRAINT;
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose) {
-  // Skip for default value of zero which means set ergonomically.
-  if (FLAG_IS_CMDLINE(TLABSize)) {
-    if (value < MinTLABSize) {
-      CommandLineError::print(verbose,
-                              "TLABSize (" SIZE_FORMAT ") must be "
-                              "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
-                              value, MinTLABSize);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-    if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
-      CommandLineError::print(verbose,
-                              "TLABSize (" SIZE_FORMAT ") must be "
-                              "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
-                              value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
-
-// We will protect overflow from ThreadLocalAllocBuffer::record_slow_allocation(),
-// so AfterMemoryInit type is enough to check.
-Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
-  if (UseTLAB) {
-    size_t refill_waste_limit = Thread::current()->tlab().refill_waste_limit();
-
-    // Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
-    if (refill_waste_limit > (max_uintx - value)) {
-      CommandLineError::print(verbose,
-                              "TLABWasteIncrement (" UINTX_FORMAT ") must be "
-                              "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
-                              value, (max_uintx - refill_waste_limit));
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
-
-Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
-  if (FLAG_IS_CMDLINE(SurvivorRatio) &&
-      (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
-    CommandLineError::print(verbose,
-                            "SurvivorRatio (" UINTX_FORMAT ") must be "
-                            "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
-                            value,
-                            (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
-  if (value > MaxMetaspaceSize) {
-    CommandLineError::print(verbose,
-                            "MetaspaceSize (" SIZE_FORMAT ") must be "
-                            "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
-                            value, MaxMetaspaceSize);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
-  if (value < MetaspaceSize) {
-    CommandLineError::print(verbose,
-                            "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
-                            "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
-                            value, MaxMetaspaceSize);
-    return Flag::VIOLATES_CONSTRAINT;
-  } else {
-    return Flag::SUCCESS;
-  }
-}
-
-Flag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose) {
-  if (value != 0) {
-    if (!is_power_of_2(value)) {
-      CommandLineError::print(verbose,
-                              "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
-                              "power of 2\n",
-                              value);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-    if (value < ObjectAlignmentInBytes) {
-      CommandLineError::print(verbose,
-                              "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
-                              "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
-                              value, ObjectAlignmentInBytes);
-      return Flag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return Flag::SUCCESS;
-}
--- a/src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
-#define SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
-
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/g1/commandLineFlagConstraintsG1.hpp"
-#include "gc/parallel/commandLineFlagConstraintsParallel.hpp"
-#endif
-
-/*
- * Here we have GC arguments constraints functions, which are called automatically
- * whenever flag's value changes. If the constraint fails the function should return
- * an appropriate error value.
- */
-
-Flag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose);
-Flag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose);
-Flag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose);
-Flag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose);
-
-Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose);
-Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose);
-Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
-Flag::Error NewSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose);
-Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose);
-
-// Internal
-Flag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose);
-
-#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -53,6 +53,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/jvmFlagConstraintsGC.hpp"
+#include "gc/shared/plab.hpp"
+#include "gc/shared/threadLocalAllocBuffer.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/align.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/g1/jvmFlagConstraintsG1.hpp"
+#include "gc/parallel/jvmFlagConstraintsParallel.hpp"
+#endif
+#ifdef COMPILER1
+#include "c1/c1_globals.hpp"
+#endif // COMPILER1
+#ifdef COMPILER2
+#include "opto/c2_globals.hpp"
+#endif // COMPILER2
+
+// Some flags that have default values that indicate that the
+// JVM should automatically determine an appropriate value
+// for that flag.  In those cases it is only appropriate for the
+// constraint checking to be done if the user has specified the
+// value(s) of the flag(s) on the command line.  In the constraint
+// checking functions,  FLAG_IS_CMDLINE() is used to check if
+// the flag has been set by the user and so should be checked.
+
+// As ParallelGCThreads differs among GC modes, we need constraint function.
+JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
+  JVMFlag::Error status = JVMFlag::SUCCESS;
+
+#if INCLUDE_ALL_GCS
+  status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+
+  status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  return status;
+}
+
+// As ConcGCThreads should be smaller than ParallelGCThreads,
+// we need constraint function.
+JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  // CMS and G1 GCs use ConcGCThreads.
+  if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
+    CommandLineError::print(verbose,
+                            "ConcGCThreads (" UINT32_FORMAT ") must be "
+                            "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
+                            value, ParallelGCThreads);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+#endif
+  return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
+    CommandLineError::print(verbose,
+                            "%s (" SIZE_FORMAT ") must be "
+                            "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
+                            name, value, PLAB::min_size());
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+#endif // INCLUDE_ALL_GCS
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
+    CommandLineError::print(verbose,
+                            "%s (" SIZE_FORMAT ") must be "
+                            "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
+                            name, value, PLAB::max_size());
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+#endif // INCLUDE_ALL_GCS
+  return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MinMaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
+  JVMFlag::Error status = MinPLABSizeBounds(name, value, verbose);
+
+  if (status == JVMFlag::SUCCESS) {
+    return MaxPLABSizeBounds(name, value, verbose);
+  }
+  return status;
+}
+
+JVMFlag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose) {
+  return MinMaxPLABSizeBounds("YoungPLABSize", value, verbose);
+}
+
+JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
+  JVMFlag::Error status = JVMFlag::SUCCESS;
+
+#if INCLUDE_ALL_GCS
+  if (UseConcMarkSweepGC) {
+    return OldPLABSizeConstraintFuncCMS(value, verbose);
+  } else {
+    status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
+  }
+#endif
+  return status;
+}
+
+JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
+  if (value > MaxHeapFreeRatio) {
+    CommandLineError::print(verbose,
+                            "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
+                            "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
+                            value, MaxHeapFreeRatio);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
+  if (value < MinHeapFreeRatio) {
+    CommandLineError::print(verbose,
+                            "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
+                            "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
+                            value, MinHeapFreeRatio);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+static JVMFlag::Error CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(size_t maxHeap, intx softRef, bool verbose) {
+  if ((softRef > 0) && ((maxHeap / M) > (max_uintx / softRef))) {
+    CommandLineError::print(verbose,
+                            "Desired lifetime of SoftReferences cannot be expressed correctly. "
+                            "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
+                            "(" INTX_FORMAT ") is too large\n",
+                            maxHeap, softRef);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose) {
+  return CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(MaxHeapSize, value, verbose);
+}
+
+JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
+  if (value > MarkStackSizeMax) {
+    CommandLineError::print(verbose,
+                            "MarkStackSize (" SIZE_FORMAT ") must be "
+                            "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
+                            value, MarkStackSizeMax);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
+  if (value > MaxMetaspaceFreeRatio) {
+    CommandLineError::print(verbose,
+                            "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+                            "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+                            value, MaxMetaspaceFreeRatio);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
+  if (value < MinMetaspaceFreeRatio) {
+    CommandLineError::print(verbose,
+                            "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+                            "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+                            value, MinMetaspaceFreeRatio);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  JVMFlag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  JVMFlag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  // MaxTenuringThreshold=0 means NeverTenure=false && AlwaysTenure=true
+  if ((value == 0) && (NeverTenure || !AlwaysTenure)) {
+    CommandLineError::print(verbose,
+                            "MaxTenuringThreshold (0) should match to NeverTenure=false "
+                            "&& AlwaysTenure=true. But we have NeverTenure=%s "
+                            "AlwaysTenure=%s\n",
+                            NeverTenure ? "true" : "false",
+                            AlwaysTenure ? "true" : "false");
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  JVMFlag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  JVMFlag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+  size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
+  if (value > aligned_max) {
+    CommandLineError::print(verbose,
+                            "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
+                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+                            value, aligned_max);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+// To avoid an overflow by 'align_up(value, alignment)'.
+static JVMFlag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
+  size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
+  if (value > aligned_max) {
+    CommandLineError::print(verbose,
+                            "%s (" SIZE_FORMAT ") must be "
+                            "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+                            name, value, aligned_max);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
+  size_t heap_alignment;
+
+#if INCLUDE_ALL_GCS
+  if (UseG1GC) {
+    // For G1 GC, we don't know until G1CollectorPolicy is created.
+    heap_alignment = MaxSizeForHeapAlignmentG1();
+  } else
+#endif
+  {
+    heap_alignment = CollectorPolicy::compute_heap_alignment();
+  }
+
+  return MaxSizeForAlignment(name, value, heap_alignment, verbose);
+}
+
+JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
+  return MaxSizeForHeapAlignment("InitialHeapSize", value, verbose);
+}
+
+JVMFlag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose) {
+  JVMFlag::Error status = MaxSizeForHeapAlignment("MaxHeapSize", value, verbose);
+
+  if (status == JVMFlag::SUCCESS) {
+    status = CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(value, SoftRefLRUPolicyMSPerMB, verbose);
+  }
+  return status;
+}
+
+JVMFlag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
+  // If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
+  // Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
+  if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
+    CommandLineError::print(verbose,
+                            "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
+                            "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
+                            value, MaxHeapSize, max_uintx);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+
+  return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
+}
+
+JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+  JVMFlag::Error status = NewSizeConstraintFuncG1(value, verbose);
+  if (status != JVMFlag::SUCCESS) {
+    return status;
+  }
+#endif
+
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose) {
+  // At least, alignment reserve area is needed.
+  if (value < ThreadLocalAllocBuffer::alignment_reserve_in_bytes()) {
+    CommandLineError::print(verbose,
+                            "MinTLABSize (" SIZE_FORMAT ") must be "
+                            "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
+                            value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
+    CommandLineError::print(verbose,
+                            "MinTLABSize (" SIZE_FORMAT ") must be "
+                            "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
+                            value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error TLABSizeConstraintFunc(size_t value, bool verbose) {
+  // Skip for default value of zero which means set ergonomically.
+  if (FLAG_IS_CMDLINE(TLABSize)) {
+    if (value < MinTLABSize) {
+      CommandLineError::print(verbose,
+                              "TLABSize (" SIZE_FORMAT ") must be "
+                              "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
+                              value, MinTLABSize);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+    if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
+      CommandLineError::print(verbose,
+                              "TLABSize (" SIZE_FORMAT ") must be "
+                              "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
+                              value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
+
+// We will protect overflow from ThreadLocalAllocBuffer::record_slow_allocation(),
+// so AfterMemoryInit type is enough to check.
+JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
+  if (UseTLAB) {
+    size_t refill_waste_limit = Thread::current()->tlab().refill_waste_limit();
+
+    // Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
+    if (refill_waste_limit > (max_uintx - value)) {
+      CommandLineError::print(verbose,
+                              "TLABWasteIncrement (" UINTX_FORMAT ") must be "
+                              "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
+                              value, (max_uintx - refill_waste_limit));
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
+  if (FLAG_IS_CMDLINE(SurvivorRatio) &&
+      (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
+    CommandLineError::print(verbose,
+                            "SurvivorRatio (" UINTX_FORMAT ") must be "
+                            "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
+                            value,
+                            (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+  if (value > MaxMetaspaceSize) {
+    CommandLineError::print(verbose,
+                            "MetaspaceSize (" SIZE_FORMAT ") must be "
+                            "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
+                            value, MaxMetaspaceSize);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+  if (value < MetaspaceSize) {
+    CommandLineError::print(verbose,
+                            "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
+                            "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
+                            value, MaxMetaspaceSize);
+    return JVMFlag::VIOLATES_CONSTRAINT;
+  } else {
+    return JVMFlag::SUCCESS;
+  }
+}
+
+JVMFlag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose) {
+  if (value != 0) {
+    if (!is_power_of_2(value)) {
+      CommandLineError::print(verbose,
+                              "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+                              "power of 2\n",
+                              value);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+    if (value < ObjectAlignmentInBytes) {
+      CommandLineError::print(verbose,
+                              "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+                              "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
+                              value, ObjectAlignmentInBytes);
+      return JVMFlag::VIOLATES_CONSTRAINT;
+    }
+  }
+  return JVMFlag::SUCCESS;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
+#define SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/g1/jvmFlagConstraintsG1.hpp"
+#include "gc/parallel/jvmFlagConstraintsParallel.hpp"
+#endif
+
+/*
+ * Here we have GC arguments constraints functions, which are called automatically
+ * whenever flag's value changes. If the constraint fails the function should return
+ * an appropriate error value.
+ */
+
+JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose);
+JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose);
+JVMFlag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose);
+JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error TLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose);
+
+// Internal
+JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose);
+
+#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -37,6 +37,7 @@
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/jvmciCodeInstaller.hpp"
 #include "jvmci/jvmciRuntime.hpp"
+#include "runtime/flags/jvmFlag.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
@@ -125,7 +126,7 @@
   }
   ResourceMark rm;
   const char* cstring = java_lang_String::as_utf8_string(name());
-  Flag* flag = Flag::find_flag(cstring, strlen(cstring), /* allow_locked */ true, /* return_flag */ true);
+  JVMFlag* flag = JVMFlag::find_flag(cstring, strlen(cstring), /* allow_locked */ true, /* return_flag */ true);
   if (flag == NULL) {
     return c2vm;
   }
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -29,6 +29,7 @@
 #include "jvmci/jvmciRuntime.hpp"
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/vmStructs_jvmci.hpp"
+#include "runtime/flags/jvmFlag.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/resourceHash.hpp"
@@ -378,9 +379,9 @@
 #define COUNT_FLAG(ignore) +1
 #ifdef ASSERT
 #define CHECK_FLAG(type, name) { \
-  Flag* flag = Flag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
+  JVMFlag* flag = JVMFlag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
   assert(flag != NULL, "No such flag named " #name); \
-  assert(flag->is_##type(), "Flag " #name " is not of type " #type); \
+  assert(flag->is_##type(), "JVMFlag " #name " is not of type " #type); \
 }
 #else
 #define CHECK_FLAG(type, name)
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -415,6 +415,34 @@
   }
 JRT_END
 
+// Object.notify() fast path, caller does slow path
+JRT_LEAF(jboolean, JVMCIRuntime::object_notify(JavaThread *thread, oopDesc* obj))
+
+  // Very few notify/notifyAll operations find any threads on the waitset, so
+  // the dominant fast-path is to simply return.
+  // Relatedly, it's critical that notify/notifyAll be fast in order to
+  // reduce lock hold times.
+  if (!SafepointSynchronize::is_synchronizing()) {
+    if (ObjectSynchronizer::quick_notify(obj, thread, false)) {
+      return true;
+    }
+  }
+  return false; // caller must perform slow path
+
+JRT_END
+
+// Object.notifyAll() fast path, caller does slow path
+JRT_LEAF(jboolean, JVMCIRuntime::object_notifyAll(JavaThread *thread, oopDesc* obj))
+
+  if (!SafepointSynchronize::is_synchronizing() ) {
+    if (ObjectSynchronizer::quick_notify(obj, thread, true)) {
+      return true;
+    }
+  }
+  return false; // caller must perform slow path
+
+JRT_END
+
 JRT_ENTRY(void, JVMCIRuntime::throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message))
   TempNewSymbol symbol = SymbolTable::new_symbol(exception, CHECK);
   SharedRuntime::throw_and_post_jvmti_exception(thread, symbol, message);
--- a/src/hotspot/share/jvmci/jvmciRuntime.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -139,6 +139,8 @@
   static address exception_handler_for_pc(JavaThread* thread);
   static void monitorenter(JavaThread* thread, oopDesc* obj, BasicLock* lock);
   static void monitorexit (JavaThread* thread, oopDesc* obj, BasicLock* lock);
+  static jboolean object_notify(JavaThread* thread, oopDesc* obj);
+  static jboolean object_notifyAll(JavaThread* thread, oopDesc* obj);
   static void vm_error(JavaThread* thread, jlong where, jlong format, jlong value);
   static oopDesc* load_and_clear_exception(JavaThread* thread);
   static void log_printf(JavaThread* thread, oopDesc* format, jlong v1, jlong v2, jlong v3);
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -35,6 +35,7 @@
 #include "oops/oop.hpp"
 #include "oops/oopHandle.hpp"
 #include "oops/objArrayKlass.hpp"
+#include "runtime/flags/jvmFlag.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
@@ -146,16 +147,16 @@
   nonstatic_field(Deoptimization::UnrollBlock, _initial_info,                          intptr_t)                                     \
   nonstatic_field(Deoptimization::UnrollBlock, _unpack_kind,                           int)                                          \
                                                                                                                                      \
-  nonstatic_field(ExceptionTableElement,       start_pc,                                       u2)                                   \
-  nonstatic_field(ExceptionTableElement,       end_pc,                                         u2)                                   \
-  nonstatic_field(ExceptionTableElement,       handler_pc,                                     u2)                                   \
-  nonstatic_field(ExceptionTableElement,       catch_type_index,                               u2)                                   \
+  nonstatic_field(ExceptionTableElement,       start_pc,                                      u2)                                    \
+  nonstatic_field(ExceptionTableElement,       end_pc,                                        u2)                                    \
+  nonstatic_field(ExceptionTableElement,       handler_pc,                                    u2)                                    \
+  nonstatic_field(ExceptionTableElement,       catch_type_index,                              u2)                                    \
                                                                                                                                      \
-  nonstatic_field(Flag,                        _type,                                          const char*)                          \
-  nonstatic_field(Flag,                        _name,                                          const char*)                          \
-  unchecked_nonstatic_field(Flag,              _addr,                                          sizeof(void*))                        \
-  nonstatic_field(Flag,                        _flags,                                         Flag::Flags)                          \
-  static_field(Flag,                           flags,                                          Flag*)                                \
+  nonstatic_field(JVMFlag,                     _type,                                         const char*)                           \
+  nonstatic_field(JVMFlag,                     _name,                                         const char*)                           \
+  unchecked_nonstatic_field(JVMFlag,           _addr,                                         sizeof(void*))                         \
+  nonstatic_field(JVMFlag,                     _flags,                                        JVMFlag::Flags)                        \
+  static_field(JVMFlag,                        flags,                                         JVMFlag*)                              \
                                                                                                                                      \
   nonstatic_field(InstanceKlass,               _fields,                                       Array<u2>*)                            \
   nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
@@ -345,8 +346,8 @@
   declare_toplevel_type(BasicLock)                                        \
   declare_toplevel_type(CompilerToVM)                                     \
   declare_toplevel_type(ExceptionTableElement)                            \
-  declare_toplevel_type(Flag)                                             \
-  declare_toplevel_type(Flag*)                                            \
+  declare_toplevel_type(JVMFlag)                                          \
+  declare_toplevel_type(JVMFlag*)                                         \
   declare_toplevel_type(InvocationCounter)                                \
   declare_toplevel_type(JVMCIEnv)                                         \
   declare_toplevel_type(LocalVariableTableElement)                        \
@@ -625,6 +626,8 @@
   declare_function(JVMCIRuntime::exception_handler_for_pc) \
   declare_function(JVMCIRuntime::monitorenter) \
   declare_function(JVMCIRuntime::monitorexit) \
+  declare_function(JVMCIRuntime::object_notify) \
+  declare_function(JVMCIRuntime::object_notifyAll) \
   declare_function(JVMCIRuntime::throw_and_post_jvmti_exception) \
   declare_function(JVMCIRuntime::throw_klass_external_name_exception) \
   declare_function(JVMCIRuntime::throw_class_cast_exception) \
--- a/src/hotspot/share/memory/filemap.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/memory/filemap.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -75,6 +75,7 @@
 class FileMapInfo : public CHeapObj<mtInternal> {
 private:
   friend class ManifestStream;
+  friend class VMStructs;
   enum {
     _invalid_version = -1,
     _current_version = 3
--- a/src/hotspot/share/memory/metaspace.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -3378,6 +3378,7 @@
 }
 
 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
+  assert_lock_strong(_lock);
   assert(vs_list()->current_virtual_space() != NULL,
          "Should have been set");
   assert(current_chunk() == NULL ||
@@ -3406,7 +3407,16 @@
   // and do an allocation from it.
   if (next != NULL) {
     // Add to this manager's list of chunks in use.
-    add_chunk(next, false);
+    // If the new chunk is humongous, it was created to serve a single large allocation. In that
+    // case it usually makes no sense to make it the current chunk, since the next allocation would
+    // need to allocate a new chunk anyway, while we would now prematurely retire a perfectly
+    // good chunk which could be used for more normal allocations.
+    bool make_current = true;
+    if (next->get_chunk_type() == HumongousIndex &&
+        current_chunk() != NULL) {
+      make_current = false;
+    }
+    add_chunk(next, make_current);
     mem = next->allocate(word_size);
   }
 
@@ -3553,6 +3563,7 @@
 // Adds a chunk to the list of chunks in use.
 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) {
 
+  assert_lock_strong(_lock);
   assert(new_chunk != NULL, "Should not be NULL");
   assert(new_chunk->next() == NULL, "Should not be on a list");
 
@@ -3562,28 +3573,16 @@
   // chunk for that list.
   ChunkIndex index = chunk_manager()->list_index(new_chunk->word_size());
 
-  if (index != HumongousIndex) {
+  if (make_current) {
+    // If we are to make the chunk current, retire the old current chunk and replace
+    // it with the new chunk.
     retire_current_chunk();
     set_current_chunk(new_chunk);
-    new_chunk->set_next(chunks_in_use(index));
-    set_chunks_in_use(index, new_chunk);
-  } else {
-    // For null class loader data and DumpSharedSpaces, the first chunk isn't
-    // small, so small will be null.  Link this first chunk as the current
-    // chunk.
-    if (make_current) {
-      // Set as the current chunk but otherwise treat as a humongous chunk.
-      set_current_chunk(new_chunk);
-    }
-    // Link at head.  The _current_chunk only points to a humongous chunk for
-    // the null class loader metaspace (class and data virtual space managers)
-    // any humongous chunks so will not point to the tail
-    // of the humongous chunks list.
-    new_chunk->set_next(chunks_in_use(HumongousIndex));
-    set_chunks_in_use(HumongousIndex, new_chunk);
-
-    assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
-  }
+  }
+
+  // Add the new chunk at the head of its respective chunk list.
+  new_chunk->set_next(chunks_in_use(index));
+  set_chunks_in_use(index, new_chunk);
 
   // Add to the running sum of capacity
   inc_size_metrics(new_chunk->word_size());
@@ -4800,7 +4799,7 @@
 void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
   Metachunk* chunk = get_initialization_chunk(type, mdtype);
   if (chunk != NULL) {
-    // Add to this manager's list of chunks in use and current_chunk().
+    // Add to this manager's list of chunks in use and make it the current_chunk().
     get_space_manager(mdtype)->add_chunk(chunk, true);
   }
 }
--- a/src/hotspot/share/memory/universe.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/memory/universe.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -63,7 +63,8 @@
 #include "prims/resolvedMethodTable.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
+#include "runtime/flags/flagSetting.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
@@ -701,7 +702,7 @@
   AOTLoader::universe_init();
 
   // Checks 'AfterMemoryInit' constraints.
-  if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
+  if (!JVMFlagConstraintList::check_constraints(JVMFlagConstraint::AfterMemoryInit)) {
     return JNI_EINVAL;
   }
 
@@ -786,6 +787,7 @@
       // Did reserve heap below 32Gb. Can use base == 0;
       Universe::set_narrow_oop_base(0);
     }
+    AOTLoader::set_narrow_oop_shift();
 
     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 
--- a/src/hotspot/share/oops/klassVtable.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/oops/klassVtable.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -38,6 +38,7 @@
 #include "oops/objArrayOop.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "utilities/copy.hpp"
--- a/src/hotspot/share/oops/oop.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/oops/oop.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -122,10 +122,9 @@
 
 // used only for asserts and guarantees
 bool oopDesc::is_oop(oop obj, bool ignore_mark_word) {
-  if (!check_obj_alignment(obj)) return false;
-  if (!Universe::heap()->is_in_reserved(obj)) return false;
-  // obj is aligned and accessible in heap
-  if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
+  if (!Universe::heap()->is_oop(obj)) {
+    return false;
+  }
 
   // Header verification: the mark is typically non-NULL. If we're
   // at a safepoint, it must not be null.
--- a/src/hotspot/share/opto/cfgnode.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/cfgnode.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -298,6 +298,7 @@
   void reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn);
   ProjNode* uncommon_trap_proj(CallStaticJavaNode*& call) const;
   bool fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn);
+  static bool is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc);
 
 protected:
   ProjNode* range_check_trap_proj(int& flip, Node*& l, Node*& r);
@@ -484,8 +485,13 @@
 // Indirect branch.  Uses PCTable above to implement a switch statement.
 // It emits as a table load and local branch.
 class JumpNode : public PCTableNode {
+  virtual uint size_of() const { return sizeof(*this); }
 public:
-  JumpNode( Node* control, Node* switch_val, uint size) : PCTableNode(control, switch_val, size) {
+  float* _probs; // probability of each projection
+  float _fcnt;   // total number of times this Jump was executed
+  JumpNode( Node* control, Node* switch_val, uint size, float* probs, float cnt)
+    : PCTableNode(control, switch_val, size),
+      _probs(probs), _fcnt(cnt) {
     init_class_id(Class_Jump);
   }
   virtual int   Opcode() const;
--- a/src/hotspot/share/opto/gcm.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/gcm.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1867,8 +1867,7 @@
   }
 
   case Op_Jump:
-    // Divide the frequency between all successors evenly
-    return 1.0f/_num_succs;
+    return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
 
   case Op_Catch: {
     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
--- a/src/hotspot/share/opto/ifnode.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/ifnode.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -775,6 +775,38 @@
   return success != NULL && fail != NULL;
 }
 
+bool IfNode::is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc) {
+  // Different methods and methods containing jsrs are not supported.
+  ciMethod* method = unc->jvms()->method();
+  ciMethod* dom_method = dom_unc->jvms()->method();
+  if (method != dom_method || method->has_jsrs()) {
+    return false;
+  }
+  // Check that both traps are in the same activation of the method (instead
+  // of two activations being inlined through different call sites) by verifying
+  // that the call stacks are equal for both JVMStates.
+  JVMState* dom_caller = dom_unc->jvms()->caller();
+  JVMState* caller = unc->jvms()->caller();
+  if ((dom_caller == NULL) != (caller == NULL)) {
+    // The current method must either be inlined into both dom_caller and
+    // caller or must not be inlined at all (top method). Bail out otherwise.
+    return false;
+  } else if (dom_caller != NULL && !dom_caller->same_calls_as(caller)) {
+    return false;
+  }
+  // Check that the bci of the dominating uncommon trap dominates the bci
+  // of the dominated uncommon trap. Otherwise we may not re-execute
+  // the dominated check after deoptimization from the merged uncommon trap.
+  ciTypeFlow* flow = dom_method->get_flow_analysis();
+  int bci = unc->jvms()->bci();
+  int dom_bci = dom_unc->jvms()->bci();
+  if (!flow->is_dominated_by(bci, dom_bci)) {
+    return false;
+  }
+
+  return true;
+}
+
 // Return projection that leads to an uncommon trap if any
 ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call) const {
   for (int i = 0; i < 2; i++) {
@@ -811,31 +843,7 @@
         return false;
       }
 
-      // Different methods and methods containing jsrs are not supported.
-      ciMethod* method = unc->jvms()->method();
-      ciMethod* dom_method = dom_unc->jvms()->method();
-      if (method != dom_method || method->has_jsrs()) {
-        return false;
-      }
-      // Check that both traps are in the same activation of the method (instead
-      // of two activations being inlined through different call sites) by verifying
-      // that the call stacks are equal for both JVMStates.
-      JVMState* dom_caller = dom_unc->jvms()->caller();
-      JVMState* caller = unc->jvms()->caller();
-      if ((dom_caller == NULL) != (caller == NULL)) {
-        // The current method must either be inlined into both dom_caller and
-        // caller or must not be inlined at all (top method). Bail out otherwise.
-        return false;
-      } else if (dom_caller != NULL && !dom_caller->same_calls_as(caller)) {
-        return false;
-      }
-      // Check that the bci of the dominating uncommon trap dominates the bci
-      // of the dominated uncommon trap. Otherwise we may not re-execute
-      // the dominated check after deoptimization from the merged uncommon trap.
-      ciTypeFlow* flow = dom_method->get_flow_analysis();
-      int bci = unc->jvms()->bci();
-      int dom_bci = dom_unc->jvms()->bci();
-      if (!flow->is_dominated_by(bci, dom_bci)) {
+      if (!is_dominator_unc(dom_unc, unc)) {
         return false;
       }
 
@@ -843,6 +851,8 @@
       // will be changed and the state of the dominating If will be
       // used. Checked that we didn't apply this transformation in a
       // previous compilation and it didn't cause too many traps
+      ciMethod* dom_method = dom_unc->jvms()->method();
+      int dom_bci = dom_unc->jvms()->bci();
       if (!igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_unstable_fused_if) &&
           !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check)) {
         success = unc_proj;
@@ -1220,6 +1230,10 @@
         return false;
       }
 
+      if (!is_dominator_unc(dom_unc, unc)) {
+        return false;
+      }
+
       return true;
     }
   }
--- a/src/hotspot/share/opto/machnode.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/machnode.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -750,6 +750,16 @@
 #endif
 };
 
+//------------------------------MachJumpNode-----------------------------------
+// Machine-specific versions of JumpNodes
+class MachJumpNode : public MachConstantNode {
+public:
+  float* _probs;
+  MachJumpNode() : MachConstantNode() {
+    init_class_id(Class_MachJump);
+  }
+};
+
 //------------------------------MachGotoNode-----------------------------------
 // Machine-specific versions of GotoNodes
 class MachGotoNode : public MachBranchNode {
--- a/src/hotspot/share/opto/node.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/node.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -94,6 +94,7 @@
 class MachConstantNode;
 class MachGotoNode;
 class MachIfNode;
+class MachJumpNode;
 class MachNode;
 class MachNullCheckNode;
 class MachProjNode;
@@ -651,6 +652,7 @@
       DEFINE_CLASS_ID(MachTemp,         Mach, 3)
       DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
       DEFINE_CLASS_ID(MachConstant,     Mach, 5)
+        DEFINE_CLASS_ID(MachJump,       MachConstant, 0)
       DEFINE_CLASS_ID(MachMerge,        Mach, 6)
 
     DEFINE_CLASS_ID(Type,  Node, 2)
@@ -831,6 +833,7 @@
   DEFINE_CLASS_QUERY(MachConstant)
   DEFINE_CLASS_QUERY(MachGoto)
   DEFINE_CLASS_QUERY(MachIf)
+  DEFINE_CLASS_QUERY(MachJump)
   DEFINE_CLASS_QUERY(MachNullCheck)
   DEFINE_CLASS_QUERY(MachProj)
   DEFINE_CLASS_QUERY(MachReturn)
--- a/src/hotspot/share/opto/parse.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/parse.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -552,17 +552,18 @@
   void    sharpen_type_after_if(BoolTest::mask btest,
                                 Node* con, const Type* tcon,
                                 Node* val, const Type* tval);
-  IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
+  IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
   Node*   jump_if_join(Node* iffalse, Node* iftrue);
-  void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
-  void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
-  void    jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
+  void    jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index, bool unc);
+  void    jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index, bool unc);
+  void    jump_if_always_fork(int dest_bci_if_true, int prof_table_index, bool unc);
 
   friend class SwitchRange;
   void    do_tableswitch();
   void    do_lookupswitch();
   void    jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
   bool    create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
+  void    linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
 
   void decrement_age();
   // helper functions for methodData style profiling
--- a/src/hotspot/share/opto/parse2.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/parse2.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -186,10 +186,10 @@
 
 
 // returns IfNode
-IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) {
-  Node   *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
-  Node   *tst = _gvn.transform( new BoolNode( cmp, mask));
-  IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN );
+IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
+  Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
+  Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
+  IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
   return iff;
 }
 
@@ -205,15 +205,27 @@
   return region;
 }
 
+// sentinel value for the target bci to mark never taken branches
+// (according to profiling)
+static const int never_reached = INT_MAX;
 
 //------------------------------helper for tableswitch-------------------------
-void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
+void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) {
   // True branch, use existing map info
   { PreserveJVMState pjvms(this);
     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
     set_control( iftrue );
-    profile_switch_case(prof_table_index);
-    merge_new_path(dest_bci_if_true);
+    if (unc) {
+      repush_if_args();
+      uncommon_trap(Deoptimization::Reason_unstable_if,
+                    Deoptimization::Action_reinterpret,
+                    NULL,
+                    "taken always");
+    } else {
+      assert(dest_bci_if_true != never_reached, "inconsistent dest");
+      profile_switch_case(prof_table_index);
+      merge_new_path(dest_bci_if_true);
+    }
   }
 
   // False branch
@@ -221,13 +233,22 @@
   set_control( iffalse );
 }
 
-void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) {
+void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) {
   // True branch, use existing map info
   { PreserveJVMState pjvms(this);
     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
     set_control( iffalse );
-    profile_switch_case(prof_table_index);
-    merge_new_path(dest_bci_if_true);
+    if (unc) {
+      repush_if_args();
+      uncommon_trap(Deoptimization::Reason_unstable_if,
+                    Deoptimization::Action_reinterpret,
+                    NULL,
+                    "taken never");
+    } else {
+      assert(dest_bci_if_true != never_reached, "inconsistent dest");
+      profile_switch_case(prof_table_index);
+      merge_new_path(dest_bci_if_true);
+    }
   }
 
   // False branch
@@ -235,10 +256,19 @@
   set_control( iftrue );
 }
 
-void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) {
+void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) {
   // False branch, use existing map and control()
-  profile_switch_case(prof_table_index);
-  merge_new_path(dest_bci);
+  if (unc) {
+    repush_if_args();
+    uncommon_trap(Deoptimization::Reason_unstable_if,
+                  Deoptimization::Action_reinterpret,
+                  NULL,
+                  "taken never");
+  } else {
+    assert(dest_bci != never_reached, "inconsistent dest");
+    profile_switch_case(prof_table_index);
+    merge_new_path(dest_bci);
+  }
 }
 
 
@@ -261,6 +291,7 @@
   jint _hi;                     // inclusive upper limit
   int _dest;
   int _table_index;             // index into method data table
+  float _cnt;                   // how many times this range was hit according to profiling
 
 public:
   jint lo() const              { return _lo;   }
@@ -268,44 +299,111 @@
   int  dest() const            { return _dest; }
   int  table_index() const     { return _table_index; }
   bool is_singleton() const    { return _lo == _hi; }
+  float cnt() const            { return _cnt; }
 
-  void setRange(jint lo, jint hi, int dest, int table_index) {
+  void setRange(jint lo, jint hi, int dest, int table_index, float cnt) {
     assert(lo <= hi, "must be a non-empty range");
-    _lo = lo, _hi = hi; _dest = dest; _table_index = table_index;
+    _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt;
+    assert(_cnt >= 0, "");
   }
-  bool adjoinRange(jint lo, jint hi, int dest, int table_index) {
+  bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) {
     assert(lo <= hi, "must be a non-empty range");
-    if (lo == _hi+1 && dest == _dest && table_index == _table_index) {
+    if (lo == _hi+1 && table_index == _table_index) {
+      // see merge_ranges() comment below
+      if (trim_ranges) {
+        if (cnt == 0) {
+          if (_cnt != 0) {
+            return false;
+          }
+          if (dest != _dest) {
+            _dest = never_reached;
+          }
+        } else {
+          if (_cnt == 0) {
+            return false;
+          }
+          if (dest != _dest) {
+            return false;
+          }
+        }
+      } else {
+        if (dest != _dest) {
+          return false;
+        }
+      }
       _hi = hi;
+      _cnt += cnt;
       return true;
     }
     return false;
   }
 
-  void set (jint value, int dest, int table_index) {
-    setRange(value, value, dest, table_index);
+  void set (jint value, int dest, int table_index, float cnt) {
+    setRange(value, value, dest, table_index, cnt);
   }
-  bool adjoin(jint value, int dest, int table_index) {
-    return adjoinRange(value, value, dest, table_index);
+  bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) {
+    return adjoinRange(value, value, dest, table_index, cnt, trim_ranges);
+  }
+  bool adjoin(SwitchRange& other) {
+    return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false);
   }
 
   void print() {
     if (is_singleton())
-      tty->print(" {%d}=>%d", lo(), dest());
+      tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
     else if (lo() == min_jint)
-      tty->print(" {..%d}=>%d", hi(), dest());
+      tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
     else if (hi() == max_jint)
-      tty->print(" {%d..}=>%d", lo(), dest());
+      tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
     else
-      tty->print(" {%d..%d}=>%d", lo(), hi(), dest());
+      tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
   }
 };
 
+// We try to minimize the number of ranges and the size of the taken
+// ones using profiling data. When ranges are created,
+// SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
+// if both were never hit or both were hit to build longer unreached
+// ranges. Here, we now merge adjoining ranges with the same
+// destination and finally set destination of unreached ranges to the
+// special value never_reached because it can help minimize the number
+// of tests that are necessary.
+//
+// For instance:
+// [0, 1] to target1 sometimes taken
+// [1, 2] to target1 never taken
+// [2, 3] to target2 never taken
+// would lead to:
+// [0, 1] to target1 sometimes taken
+// [1, 3] never taken
+//
+// (first 2 ranges to target1 are not merged)
+static void merge_ranges(SwitchRange* ranges, int& rp) {
+  if (rp == 0) {
+    return;
+  }
+  int shift = 0;
+  for (int j = 0; j < rp; j++) {
+    SwitchRange& r1 = ranges[j-shift];
+    SwitchRange& r2 = ranges[j+1];
+    if (r1.adjoin(r2)) {
+      shift++;
+    } else if (shift > 0) {
+      ranges[j+1-shift] = r2;
+    }
+  }
+  rp -= shift;
+  for (int j = 0; j <= rp; j++) {
+    SwitchRange& r = ranges[j];
+    if (r.cnt() == 0 && r.dest() != never_reached) {
+      r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt());
+    }
+  }
+}
 
 //-------------------------------do_tableswitch--------------------------------
 void Parse::do_tableswitch() {
   Node* lookup = pop();
-
   // Get information about tableswitch
   int default_dest = iter().get_dest_table(0);
   int lo_index     = iter().get_int_table(1);
@@ -319,31 +417,58 @@
     return;
   }
 
+  ciMethodData* methodData = method()->method_data();
+  ciMultiBranchData* profile = NULL;
+  if (methodData->is_mature() && UseSwitchProfiling) {
+    ciProfileData* data = methodData->bci_to_data(bci());
+    if (data != NULL && data->is_MultiBranchData()) {
+      profile = (ciMultiBranchData*)data;
+    }
+  }
+  bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
+
   // generate decision tree, using trichotomy when possible
   int rnum = len+2;
   bool makes_backward_branch = false;
   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
   int rp = -1;
   if (lo_index != min_jint) {
-    ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex);
+    uint cnt = 1;
+    if (profile != NULL) {
+      cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1);
+    }
+    ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt);
   }
   for (int j = 0; j < len; j++) {
     jint match_int = lo_index+j;
     int  dest      = iter().get_dest_table(j+3);
     makes_backward_branch |= (dest <= bci());
     int  table_index = method_data_update() ? j : NullTableIndex;
-    if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) {
-      ranges[++rp].set(match_int, dest, table_index);
+    uint cnt = 1;
+    if (profile != NULL) {
+      cnt = profile->count_at(j);
+    }
+    if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) {
+      ranges[++rp].set(match_int, dest, table_index, cnt);
     }
   }
   jint highest = lo_index+(len-1);
   assert(ranges[rp].hi() == highest, "");
-  if (highest != max_jint
-      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) {
-    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
+  if (highest != max_jint) {
+    uint cnt = 1;
+    if (profile != NULL) {
+      cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1);
+    }
+    if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) {
+      ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt);
+    }
   }
   assert(rp < len+2, "not too many ranges");
 
+  if (trim_ranges) {
+    merge_ranges(ranges, rp);
+  }
+
   // Safepoint in case if backward branch observed
   if( makes_backward_branch && UseLoopSafepoints )
     add_safepoint();
@@ -365,48 +490,263 @@
     return;
   }
 
+  ciMethodData* methodData = method()->method_data();
+  ciMultiBranchData* profile = NULL;
+  if (methodData->is_mature() && UseSwitchProfiling) {
+    ciProfileData* data = methodData->bci_to_data(bci());
+    if (data != NULL && data->is_MultiBranchData()) {
+      profile = (ciMultiBranchData*)data;
+    }
+  }
+  bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
+
   // generate decision tree, using trichotomy when possible
-  jint* table = NEW_RESOURCE_ARRAY(jint, len*2);
+  jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
   {
-    for( int j = 0; j < len; j++ ) {
-      table[j+j+0] = iter().get_int_table(2+j+j);
-      table[j+j+1] = iter().get_dest_table(2+j+j+1);
+    for (int j = 0; j < len; j++) {
+      table[3*j+0] = iter().get_int_table(2+2*j);
+      table[3*j+1] = iter().get_dest_table(2+2*j+1);
+      table[3*j+2] = profile == NULL ? 1 : profile->count_at(j);
     }
-    qsort( table, len, 2*sizeof(table[0]), jint_cmp );
+    qsort(table, len, 3*sizeof(table[0]), jint_cmp);
+  }
+
+  float defaults = 0;
+  jint prev = min_jint;
+  for (int j = 0; j < len; j++) {
+    jint match_int = table[3*j+0];
+    if (match_int != prev) {
+      defaults += (float)match_int - prev;
+    }
+    prev = match_int+1;
+  }
+  if (prev-1 != max_jint) {
+    defaults += (float)max_jint - prev + 1;
+  }
+  float default_cnt = 1;
+  if (profile != NULL) {
+    default_cnt = profile->default_count()/defaults;
   }
 
   int rnum = len*2+1;
   bool makes_backward_branch = false;
   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
   int rp = -1;
-  for( int j = 0; j < len; j++ ) {
-    jint match_int   = table[j+j+0];
-    int  dest        = table[j+j+1];
+  for (int j = 0; j < len; j++) {
+    jint match_int   = table[3*j+0];
+    int  dest        = table[3*j+1];
+    int  cnt         = table[3*j+2];
     int  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
     int  table_index = method_data_update() ? j : NullTableIndex;
     makes_backward_branch |= (dest <= bci());
-    if( match_int != next_lo ) {
-      ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex);
+    float c = default_cnt * ((float)match_int - next_lo);
+    if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) {
+      assert(default_dest != never_reached, "sentinel value for dead destinations");
+      ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c);
     }
-    if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) {
-      ranges[++rp].set(match_int, dest, table_index);
+    if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) {
+      assert(dest != never_reached, "sentinel value for dead destinations");
+      ranges[++rp].set(match_int, dest, table_index, cnt);
     }
   }
-  jint highest = table[2*(len-1)];
+  jint highest = table[3*(len-1)];
   assert(ranges[rp].hi() == highest, "");
-  if( highest != max_jint
-      && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) {
-    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex);
+  if (highest != max_jint &&
+      !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) {
+    ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest));
   }
   assert(rp < rnum, "not too many ranges");
 
+  if (trim_ranges) {
+    merge_ranges(ranges, rp);
+  }
+
   // Safepoint in case backward branch observed
-  if( makes_backward_branch && UseLoopSafepoints )
+  if (makes_backward_branch && UseLoopSafepoints)
     add_safepoint();
 
   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 }
 
+static float if_prob(float taken_cnt, float total_cnt) {
+  assert(taken_cnt <= total_cnt, "");
+  if (total_cnt == 0) {
+    return PROB_FAIR;
+  }
+  float p = taken_cnt / total_cnt;
+  return MIN2(MAX2(p, PROB_MIN), PROB_MAX);
+}
+
+static float if_cnt(float cnt) {
+  if (cnt == 0) {
+    return COUNT_UNKNOWN;
+  }
+  return cnt;
+}
+
+static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
+  float total_cnt = 0;
+  for (SwitchRange* sr = lo; sr <= hi; sr++) {
+    total_cnt += sr->cnt();
+  }
+  return total_cnt;
+}
+
+class SwitchRanges : public ResourceObj {
+public:
+  SwitchRange* _lo;
+  SwitchRange* _hi;
+  SwitchRange* _mid;
+  float _cost;
+
+  enum {
+    Start,
+    LeftDone,
+    RightDone,
+    Done
+  } _state;
+
+  SwitchRanges(SwitchRange *lo, SwitchRange *hi)
+    : _lo(lo), _hi(hi), _mid(NULL),
+      _cost(0), _state(Start) {
+  }
+
+  SwitchRanges()
+    : _lo(NULL), _hi(NULL), _mid(NULL),
+      _cost(0), _state(Start) {}
+};
+
+// Estimate cost of performing a binary search on lo..hi
+static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
+  GrowableArray<SwitchRanges> tree;
+  SwitchRanges root(lo, hi);
+  tree.push(root);
+
+  float cost = 0;
+  do {
+    SwitchRanges& r = *tree.adr_at(tree.length()-1);
+    if (r._hi != r._lo) {
+      if (r._mid == NULL) {
+        float r_cnt = sum_of_cnts(r._lo, r._hi);
+
+        if (r_cnt == 0) {
+          tree.pop();
+          cost = 0;
+          continue;
+        }
+
+        SwitchRange* mid = NULL;
+        mid = r._lo;
+        for (float cnt = 0; ; ) {
+          assert(mid <= r._hi, "out of bounds");
+          cnt += mid->cnt();
+          if (cnt > r_cnt / 2) {
+            break;
+          }
+          mid++;
+        }
+        assert(mid <= r._hi, "out of bounds");
+        r._mid = mid;
+        r._cost = r_cnt / total_cnt;
+      }
+      r._cost += cost;
+      if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
+        cost = 0;
+        r._state = SwitchRanges::LeftDone;
+        tree.push(SwitchRanges(r._lo, r._mid-1));
+      } else if (r._state < SwitchRanges::RightDone) {
+        cost = 0;
+        r._state = SwitchRanges::RightDone;
+        tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
+      } else {
+        tree.pop();
+        cost = r._cost;
+      }
+    } else {
+      tree.pop();
+      cost = r._cost;
+    }
+  } while (tree.length() > 0);
+
+
+  return cost;
+}
+
+// It sometimes pays off to test most common ranges before the binary search
+void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
+  uint nr = hi - lo + 1;
+  float total_cnt = sum_of_cnts(lo, hi);
+
+  float min = compute_tree_cost(lo, hi, total_cnt);
+  float extra = 1;
+  float sub = 0;
+
+  SwitchRange* array1 = lo;
+  SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
+
+  SwitchRange* ranges = NULL;
+
+  while (nr >= 2) {
+    assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
+    ranges = (lo == array1) ? array2 : array1;
+
+    // Find highest frequency range
+    SwitchRange* candidate = lo;
+    for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
+      if (sr->cnt() > candidate->cnt()) {
+        candidate = sr;
+      }
+    }
+    SwitchRange most_freq = *candidate;
+    if (most_freq.cnt() == 0) {
+      break;
+    }
+
+    // Copy remaining ranges into another array
+    int shift = 0;
+    for (uint i = 0; i < nr; i++) {
+      SwitchRange* sr = &lo[i];
+      if (sr != candidate) {
+        ranges[i-shift] = *sr;
+      } else {
+        shift++;
+        if (i > 0 && i < nr-1) {
+          SwitchRange prev = lo[i-1];
+          prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt());
+          if (prev.adjoin(lo[i+1])) {
+            shift++;
+            i++;
+          }
+          ranges[i-shift] = prev;
+        }
+      }
+    }
+    nr -= shift;
+
+    // Evaluate cost of testing the most common range and performing a
+    // binary search on the other ranges
+    float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
+    if (cost >= min) {
+      break;
+    }
+    // swap arrays
+    lo = &ranges[0];
+    hi = &ranges[nr-1];
+
+    // It pays off: emit the test for the most common range
+    assert(most_freq.cnt() > 0, "must be taken");
+    Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
+    Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo())));
+    Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
+    IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
+    jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false);
+
+    sub += most_freq.cnt() / total_cnt;
+    extra += 1 - sub;
+    min = cost;
+  }
+}
+
 //----------------------------create_jump_tables-------------------------------
 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
   // Are jumptables enabled
@@ -418,6 +758,8 @@
   // Don't make jump table if profiling
   if (method_data_update())  return false;
 
+  bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
+
   // Decide if a guard is needed to lop off big ranges at either (or
   // both) end(s) of the input set. We'll call this the default target
   // even though we can't be sure that it is the true "default".
@@ -439,12 +781,22 @@
     default_dest = hi->dest();
   }
 
+  float total = sum_of_cnts(lo, hi);
+  float cost = compute_tree_cost(lo, hi, total);
+
   // If a guard test will eliminate very sparse end ranges, then
   // it is worth the cost of an extra jump.
+  float trimmed_cnt = 0;
   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
     needs_guard = true;
-    if (default_dest == lo->dest()) lo++;
-    if (default_dest == hi->dest()) hi--;
+    if (default_dest == lo->dest()) {
+      trimmed_cnt += lo->cnt();
+      lo++;
+    }
+    if (default_dest == hi->dest()) {
+      trimmed_cnt += hi->cnt();
+      hi--;
+    }
   }
 
   // Find the total number of cases and ranges
@@ -452,8 +804,23 @@
   int num_range = hi - lo + 1;
 
   // Don't create table if: too large, too small, or too sparse.
-  if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize)
+  if (num_cases > MaxJumpTableSize)
     return false;
+  if (UseSwitchProfiling) {
+    // MinJumpTableSize is set so with a well balanced binary tree,
+    // when the number of ranges is MinJumpTableSize, it's cheaper to
+    // go through a JumpNode that a tree of IfNodes. Average cost of a
+    // tree of IfNodes with MinJumpTableSize is
+    // log2f(MinJumpTableSize) comparisons. So if the cost computed
+    // from profile data is less than log2f(MinJumpTableSize) then
+    // going with the binary search is cheaper.
+    if (cost < log2f(MinJumpTableSize)) {
+      return false;
+    }
+  } else {
+    if (num_cases < MinJumpTableSize)
+      return false;
+  }
   if (num_cases > (MaxJumpTableSparseness * num_range))
     return false;
 
@@ -465,10 +832,12 @@
   // in the switch domain.
   if (needs_guard) {
     Node*   size = _gvn.intcon(num_cases);
-    Node*   cmp = _gvn.transform( new CmpUNode(key_val, size) );
-    Node*   tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) );
-    IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN);
-    jump_if_true_fork(iff, default_dest, NullTableIndex);
+    Node*   cmp = _gvn.transform(new CmpUNode(key_val, size));
+    Node*   tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
+    IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
+    jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0);
+
+    total -= trimmed_cnt;
   }
 
   // Create an ideal node JumpTable that has projections
@@ -489,17 +858,44 @@
   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
 
   // Create the JumpNode
-  Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) );
+  Arena* arena = C->comp_arena();
+  float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
+  int i = 0;
+  if (total == 0) {
+    for (SwitchRange* r = lo; r <= hi; r++) {
+      for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
+        probs[i] = 1.0F / num_cases;
+      }
+    }
+  } else {
+    for (SwitchRange* r = lo; r <= hi; r++) {
+      float prob = r->cnt()/total;
+      for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
+        probs[i] = prob / (r->hi() - r->lo() + 1);
+      }
+    }
+  }
+
+  ciMethodData* methodData = method()->method_data();
+  ciMultiBranchData* profile = NULL;
+  if (methodData->is_mature()) {
+    ciProfileData* data = methodData->bci_to_data(bci());
+    if (data != NULL && data->is_MultiBranchData()) {
+      profile = (ciMultiBranchData*)data;
+    }
+  }
+
+  Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total));
 
   // These are the switch destinations hanging off the jumpnode
-  int i = 0;
+  i = 0;
   for (SwitchRange* r = lo; r <= hi; r++) {
     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
       {
         PreserveJVMState pjvms(this);
         set_control(input);
-        jump_if_always_fork(r->dest(), r->table_index());
+        jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0);
       }
     }
   }
@@ -511,6 +907,7 @@
 //----------------------------jump_switch_ranges-------------------------------
 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
   Block* switch_block = block();
+  bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 
   if (switch_depth == 0) {
     // Do special processing for the top-level call.
@@ -519,21 +916,23 @@
 
     // Decrement pred-numbers for the unique set of nodes.
 #ifdef ASSERT
-    // Ensure that the block's successors are a (duplicate-free) set.
-    int successors_counted = 0;  // block occurrences in [hi..lo]
-    int unique_successors = switch_block->num_successors();
-    for (int i = 0; i < unique_successors; i++) {
-      Block* target = switch_block->successor_at(i);
+    if (!trim_ranges) {
+      // Ensure that the block's successors are a (duplicate-free) set.
+      int successors_counted = 0;  // block occurrences in [hi..lo]
+      int unique_successors = switch_block->num_successors();
+      for (int i = 0; i < unique_successors; i++) {
+        Block* target = switch_block->successor_at(i);
 
-      // Check that the set of successors is the same in both places.
-      int successors_found = 0;
-      for (SwitchRange* p = lo; p <= hi; p++) {
-        if (p->dest() == target->start())  successors_found++;
+        // Check that the set of successors is the same in both places.
+        int successors_found = 0;
+        for (SwitchRange* p = lo; p <= hi; p++) {
+          if (p->dest() == target->start())  successors_found++;
+        }
+        assert(successors_found > 0, "successor must be known");
+        successors_counted += successors_found;
       }
-      assert(successors_found > 0, "successor must be known");
-      successors_counted += successors_found;
+      assert(successors_counted == (hi-lo)+1, "no unexpected successors");
     }
-    assert(successors_counted == (hi-lo)+1, "no unexpected successors");
 #endif
 
     // Maybe prune the inputs, based on the type of key_val.
@@ -545,10 +944,20 @@
       max_val = ti->_hi;
       assert(min_val <= max_val, "invalid int type");
     }
-    while (lo->hi() < min_val)  lo++;
-    if (lo->lo() < min_val)  lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index());
-    while (hi->lo() > max_val)  hi--;
-    if (hi->hi() > max_val)  hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index());
+    while (lo->hi() < min_val) {
+      lo++;
+    }
+    if (lo->lo() < min_val)  {
+      lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt());
+    }
+    while (hi->lo() > max_val) {
+      hi--;
+    }
+    if (hi->hi() > max_val) {
+      hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt());
+    }
+
+    linear_search_switch_ranges(key_val, lo, hi);
   }
 
 #ifndef PRODUCT
@@ -560,42 +969,57 @@
 
   assert(lo <= hi, "must be a non-empty set of ranges");
   if (lo == hi) {
-    jump_if_always_fork(lo->dest(), lo->table_index());
+    jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0);
   } else {
     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
 
     if (create_jump_tables(key_val, lo, hi)) return;
 
+    SwitchRange* mid = NULL;
+    float total_cnt = sum_of_cnts(lo, hi);
+
     int nr = hi - lo + 1;
+    if (UseSwitchProfiling) {
+      // Don't keep the binary search tree balanced: pick up mid point
+      // that split frequencies in half.
+      float cnt = 0;
+      for (SwitchRange* sr = lo; sr <= hi; sr++) {
+        cnt += sr->cnt();
+        if (cnt >= total_cnt / 2) {
+          mid = sr;
+          break;
+        }
+      }
+    } else {
+      mid = lo + nr/2;
 
-    SwitchRange* mid = lo + nr/2;
-    // if there is an easy choice, pivot at a singleton:
-    if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
+      // if there is an easy choice, pivot at a singleton:
+      if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
 
-    assert(lo < mid && mid <= hi, "good pivot choice");
-    assert(nr != 2 || mid == hi,   "should pick higher of 2");
-    assert(nr != 3 || mid == hi-1, "should pick middle of 3");
+      assert(lo < mid && mid <= hi, "good pivot choice");
+      assert(nr != 2 || mid == hi,   "should pick higher of 2");
+      assert(nr != 3 || mid == hi-1, "should pick middle of 3");
+    }
 
-    Node *test_val = _gvn.intcon(mid->lo());
+
+    Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
 
     if (mid->is_singleton()) {
-      IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne);
-      jump_if_false_fork(iff_ne, mid->dest(), mid->table_index());
+      IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
+      jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0);
 
       // Special Case:  If there are exactly three ranges, and the high
       // and low range each go to the same place, omit the "gt" test,
       // since it will not discriminate anything.
-      bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest());
-      if (eq_test_only) {
-        assert(mid == hi-1, "");
-      }
+      bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
 
       // if there is a higher range, test for it and process it:
       if (mid < hi && !eq_test_only) {
         // two comparisons of same values--should enable 1 test for 2 branches
         // Use BoolTest::le instead of BoolTest::gt
-        IfNode *iff_le  = jump_if_fork_int(key_val, test_val, BoolTest::le);
+        float cnt = sum_of_cnts(lo, mid-1);
+        IfNode *iff_le  = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt));
         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_le) );
         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_le) );
         { PreserveJVMState pjvms(this);
@@ -607,24 +1031,33 @@
 
     } else {
       // mid is a range, not a singleton, so treat mid..hi as a unit
-      IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge);
+      float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
+      IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
 
       // if there is a higher range, test for it and process it:
       if (mid == hi) {
-        jump_if_true_fork(iff_ge, mid->dest(), mid->table_index());
+        jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0);
       } else {
         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
         { PreserveJVMState pjvms(this);
           set_control(iftrue);
-          jump_switch_ranges(key_val, mid, hi, switch_depth+1);
+          jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
         }
         set_control(iffalse);
       }
     }
 
     // in any case, process the lower range
-    jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
+    if (mid == lo) {
+      if (mid->is_singleton()) {
+        jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
+      } else {
+        jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0);
+      }
+    } else {
+      jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
+    }
   }
 
   // Decrease pred_count for each successor after all is done.
@@ -724,7 +1157,7 @@
         Node *mask = _gvn.intcon((divisor - 1));
         // Sigh, must handle negative dividends
         Node *zero = _gvn.intcon(0);
-        IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt);
+        IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN);
         Node *iff = _gvn.transform( new IfFalseNode(ifff) );
         Node *ift = _gvn.transform( new IfTrueNode (ifff) );
         Node *reg = jump_if_join(ift, iff);
--- a/src/hotspot/share/opto/superword.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/opto/superword.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -1943,9 +1943,14 @@
         for (uint k = 0; k < use->req(); k++) {
           Node* n = use->in(k);
           if (def == n) {
-            // reductions can be loop carried dependences
-            if (def->is_reduction() && use->is_Phi())
+            // reductions should only have a Phi use at the the loop
+            // head and out of loop uses
+            if (def->is_reduction() &&
+                ((use->is_Phi() && use->in(0) == _lpt->_head) ||
+                 !_lpt->is_member(_phase->get_loop(_phase->ctrl_or_self(use))))) {
+              assert(i == p->size()-1, "must be last element of the pack");
               continue;
+            }
             if (!is_vector_use(use, k)) {
               return false;
             }
@@ -2139,8 +2144,21 @@
     // we use the memory state of the last load. However, if any load could
     // not be moved down due to the dependence constraint, we use the memory
     // state of the first load.
-    Node* last_mem  = executed_last(pk)->in(MemNode::Memory);
-    Node* first_mem = executed_first(pk)->in(MemNode::Memory);
+    Node* first_mem = pk->at(0)->in(MemNode::Memory);
+    Node* last_mem = first_mem;
+    for (uint i = 1; i < pk->size(); i++) {
+      Node* ld = pk->at(i);
+      Node* mem = ld->in(MemNode::Memory);
+      assert(in_bb(first_mem) || in_bb(mem) || mem == first_mem, "2 different memory state from outside the loop?");
+      if (in_bb(mem)) {
+        if (in_bb(first_mem) && bb_idx(mem) < bb_idx(first_mem)) {
+          first_mem = mem;
+        }
+        if (!in_bb(last_mem) || bb_idx(mem) > bb_idx(last_mem)) {
+          last_mem = mem;
+        }
+      }
+    }
     bool schedule_last = true;
     for (uint i = 0; i < pk->size(); i++) {
       Node* ld = pk->at(i);
--- a/src/hotspot/share/precompiled/precompiled.hpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/precompiled/precompiled.hpp	Thu Apr 26 22:11:15 2018 +0200
@@ -101,6 +101,7 @@
 # include "gc/shared/genCollectedHeap.hpp"
 # include "gc/shared/generation.hpp"
 # include "gc/shared/generationCounters.hpp"
+# include "gc/shared/jvmFlagConstraintsGC.hpp"
 # include "gc/shared/modRefBarrierSet.hpp"
 # include "gc/shared/referencePolicy.hpp"
 # include "gc/shared/referenceProcessor.hpp"
@@ -163,6 +164,13 @@
 # include "runtime/extendedPC.hpp"
 # include "runtime/fieldDescriptor.hpp"
 # include "runtime/fieldType.hpp"
+# include "runtime/flags/flagSetting.hpp"
+# include "runtime/flags/jvmFlag.hpp"
+# include "runtime/flags/jvmFlagConstraintList.hpp"
+# include "runtime/flags/jvmFlagConstraintsCompiler.hpp"
+# include "runtime/flags/jvmFlagConstraintsRuntime.hpp"
+# include "runtime/flags/jvmFlagRangeList.hpp"
+# include "runtime/flags/jvmFlagWriteableList.hpp"
 # include "runtime/frame.hpp"
 # include "runtime/frame.inline.hpp"
 # include "runtime/globals.hpp"
@@ -292,6 +300,7 @@
 # include "gc/cms/concurrentMarkSweepGeneration.hpp"
 # include "gc/cms/freeChunk.hpp"
 # include "gc/cms/gSpaceCounters.hpp"
+# include "gc/cms/jvmFlagConstraintsCMS.hpp"
 # include "gc/cms/parOopClosures.hpp"
 # include "gc/cms/promotionInfo.hpp"
 # include "gc/cms/yieldingWorkgroup.hpp"
@@ -299,10 +308,12 @@
 # include "gc/g1/g1BlockOffsetTable.hpp"
 # include "gc/g1/g1OopClosures.hpp"
 # include "gc/g1/g1_globals.hpp"
+# include "gc/g1/jvmFlagConstraintsG1.hpp"
 # include "gc/g1/ptrQueue.hpp"
 # include "gc/g1/satbMarkQueue.hpp"
 # include "gc/parallel/gcAdaptivePolicyCounters.hpp"
 # include "gc/parallel/immutableSpace.hpp"
+# include "gc/parallel/jvmFlagConstraintsParallel.hpp"
 # include "gc/parallel/mutableSpace.hpp"
 # include "gc/parallel/objectStartArray.hpp"
 # include "gc/parallel/parMarkBitMap.hpp"
--- a/src/hotspot/share/prims/jvm.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/prims/jvm.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -2768,15 +2768,14 @@
   return len;
 }
 
-
 // HotSpot specific jio method
-void jio_print(const char* s) {
+void jio_print(const char* s, size_t len) {
   // Try to make this function as atomic as possible.
   if (Arguments::vfprintf_hook() != NULL) {
-    jio_fprintf(defaultStream::output_stream(), "%s", s);
+    jio_fprintf(defaultStream::output_stream(), "%.*s", (int)len, s);
   } else {
     // Make an unused local variable to avoid warning from gcc 4.x compiler.
-    size_t count = ::write(defaultStream::output_fd(), s, (int)strlen(s));
+    size_t count = ::write(defaultStream::output_fd(), s, (int)len);
   }
 }
 
--- a/src/hotspot/share/prims/whitebox.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/prims/whitebox.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -53,6 +53,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
+#include "runtime/flags/jvmFlag.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handshake.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -971,29 +972,29 @@
 WB_END
 
 template <typename T>
-static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, Flag::Error (*TAt)(const char*, T*, bool, bool)) {
+static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, JVMFlag::Error (*TAt)(const char*, T*, bool, bool)) {
   if (name == NULL) {
     return false;
   }
   ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
   const char* flag_name = env->GetStringUTFChars(name, NULL);
   CHECK_JNI_EXCEPTION_(env, false);
-  Flag::Error result = (*TAt)(flag_name, value, true, true);
+  JVMFlag::Error result = (*TAt)(flag_name, value, true, true);
   env->ReleaseStringUTFChars(name, flag_name);
-  return (result == Flag::SUCCESS);
+  return (result == JVMFlag::SUCCESS);
 }
 
 template <typename T>
-static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, Flag::Error (*TAtPut)(const char*, T*, Flag::Flags)) {
+static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, JVMFlag::Error (*TAtPut)(const char*, T*, JVMFlag::Flags)) {
   if (name == NULL) {
     return false;
   }
   ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
   const char* flag_name = env->GetStringUTFChars(name, NULL);
   CHECK_JNI_EXCEPTION_(env, false);
-  Flag::Error result = (*TAtPut)(flag_name, value, Flag::INTERNAL);
+  JVMFlag::Error result = (*TAtPut)(flag_name, value, JVMFlag::INTERNAL);
   env->ReleaseStringUTFChars(name, flag_name);
-  return (result == Flag::SUCCESS);
+  return (result == JVMFlag::SUCCESS);
 }
 
 template <typename T>
@@ -1026,28 +1027,28 @@
   return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
 }
 
-static Flag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
+static JVMFlag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
   ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
   const char* flag_name = env->GetStringUTFChars(name, NULL);
   CHECK_JNI_EXCEPTION_(env, NULL);
-  Flag* result = Flag::find_flag(flag_name, strlen(flag_name), true, true);
+  JVMFlag* result = JVMFlag::find_flag(flag_name, strlen(flag_name), true, true);
   env->ReleaseStringUTFChars(name, flag_name);
   return result;
 }
 
 WB_ENTRY(jboolean, WB_IsConstantVMFlag(JNIEnv* env, jobject o, jstring name))
-  Flag* flag = getVMFlag(thread, env, name);
+  JVMFlag* flag = getVMFlag(thread, env, name);
   return (flag != NULL) && flag->is_constant_in_binary();
 WB_END
 
 WB_ENTRY(jboolean, WB_IsLockedVMFlag(JNIEnv* env, jobject o, jstring name))
-  Flag* flag = getVMFlag(thread, env, name);
+  JVMFlag* flag = getVMFlag(thread, env, name);
   return (flag != NULL) && !(flag->is_unlocked() || flag->is_unlocker());
 WB_END
 
 WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
   bool result;
-  if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
+  if (GetVMFlag <bool> (thread, env, name, &result, &JVMFlag::boolAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return booleanBox(thread, env, result);
   }
@@ -1056,7 +1057,7 @@
 
 WB_ENTRY(jobject, WB_GetIntVMFlag(JNIEnv* env, jobject o, jstring name))
   int result;
-  if (GetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAt)) {
+  if (GetVMFlag <int> (thread, env, name, &result, &JVMFlag::intAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1065,7 +1066,7 @@
 
 WB_ENTRY(jobject, WB_GetUintVMFlag(JNIEnv* env, jobject o, jstring name))
   uint result;
-  if (GetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAt)) {
+  if (GetVMFlag <uint> (thread, env, name, &result, &JVMFlag::uintAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1074,7 +1075,7 @@
 
 WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name))
   intx result;
-  if (GetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAt)) {
+  if (GetVMFlag <intx> (thread, env, name, &result, &JVMFlag::intxAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1083,7 +1084,7 @@
 
 WB_ENTRY(jobject, WB_GetUintxVMFlag(JNIEnv* env, jobject o, jstring name))
   uintx result;
-  if (GetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAt)) {
+  if (GetVMFlag <uintx> (thread, env, name, &result, &JVMFlag::uintxAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1092,7 +1093,7 @@
 
 WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name))
   uint64_t result;
-  if (GetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAt)) {
+  if (GetVMFlag <uint64_t> (thread, env, name, &result, &JVMFlag::uint64_tAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1101,7 +1102,7 @@
 
 WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name))
   uintx result;
-  if (GetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAt)) {
+  if (GetVMFlag <size_t> (thread, env, name, &result, &JVMFlag::size_tAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return longBox(thread, env, result);
   }
@@ -1110,7 +1111,7 @@
 
 WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name))
   double result;
-  if (GetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAt)) {
+  if (GetVMFlag <double> (thread, env, name, &result, &JVMFlag::doubleAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     return doubleBox(thread, env, result);
   }
@@ -1119,7 +1120,7 @@
 
 WB_ENTRY(jstring, WB_GetStringVMFlag(JNIEnv* env, jobject o, jstring name))
   ccstr ccstrResult;
-  if (GetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAt)) {
+  if (GetVMFlag <ccstr> (thread, env, name, &ccstrResult, &JVMFlag::ccstrAt)) {
     ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
     jstring result = env->NewStringUTF(ccstrResult);
     CHECK_JNI_EXCEPTION_(env, NULL);
@@ -1130,42 +1131,42 @@
 
 WB_ENTRY(void, WB_SetBooleanVMFlag(JNIEnv* env, jobject o, jstring name, jboolean value))
   bool result = value == JNI_TRUE ? true : false;
-  SetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAtPut);
+  SetVMFlag <bool> (thread, env, name, &result, &JVMFlag::boolAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetIntVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   int result = value;
-  SetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAtPut);
+  SetVMFlag <int> (thread, env, name, &result, &JVMFlag::intAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetUintVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   uint result = value;
-  SetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAtPut);
+  SetVMFlag <uint> (thread, env, name, &result, &JVMFlag::uintAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetIntxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   intx result = value;
-  SetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAtPut);
+  SetVMFlag <intx> (thread, env, name, &result, &JVMFlag::intxAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetUintxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   uintx result = value;
-  SetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAtPut);
+  SetVMFlag <uintx> (thread, env, name, &result, &JVMFlag::uintxAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetUint64VMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   uint64_t result = value;
-  SetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut);
+  SetVMFlag <uint64_t> (thread, env, name, &result, &JVMFlag::uint64_tAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetSizeTVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
   size_t result = value;
-  SetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAtPut);
+  SetVMFlag <size_t> (thread, env, name, &result, &JVMFlag::size_tAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value))
   double result = value;
-  SetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAtPut);
+  SetVMFlag <double> (thread, env, name, &result, &JVMFlag::doubleAtPut);
 WB_END
 
 WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring value))
@@ -1182,7 +1183,7 @@
   bool needFree;
   {
     ThreadInVMfromNative ttvfn(thread); // back to VM
-    needFree = SetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAtPut);
+    needFree = SetVMFlag <ccstr> (thread, env, name, &ccstrResult, &JVMFlag::ccstrAtPut);
   }
   if (value != NULL) {
     env->ReleaseStringUTFChars(value, ccstrValue);
@@ -1359,7 +1360,9 @@
   {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type);
-    ::new (blob) BufferBlob("WB::DummyBlob", full_size);
+    if (blob != NULL) {
+      ::new (blob) BufferBlob("WB::DummyBlob", full_size);
+    }
   }
   // Track memory usage statistic after releasing CodeCache_lock
   MemoryService::track_code_cache_memory_usage();
--- a/src/hotspot/share/runtime/arguments.cpp	Sat Apr 21 16:33:20 2018 -0700
+++ b/src/hotspot/share/runtime/arguments.cpp	Thu Apr 26 22:11:15 2018 +0200
@@ -42,10 +42,10 @@
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/arguments_ext.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagWriteableList.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/java.hpp"
 #include "runtime/os.inline.hpp"
@@ -739,7 +739,7 @@
 
       // if flag has become obsolete it should not have a "globals" flag defined anymore.
       if (!version_less_than(JDK_Version::current(), flag.obsolete_in)) {
-        if (Flag::find_flag(flag.name) != NULL) {
+        if (JVMFlag::find_flag(flag.name) != NULL) {
           // Temporarily disable the warning: 8196739
           // warning("Global variable for obsolete special flag entry \"%s\" should be removed", flag.name);
         }
@@ -749,7 +749,7 @@
     if (!flag.expired_in.is_undefined()) {
       // if flag has become expired it should not have a "globals" flag defined anymore.
       if (!version_less_than(JDK_Version::current(), flag.expired_in)) {
-        if (Flag::find_flag(flag.name) != NULL) {
+        if (JVMFlag::find_flag(flag.name) != NULL) {
           // Temporarily disable the warning: 8196739
           // warning("Global variable for expired flag entry \"%s\" should be removed", flag.name);
         }
@@ -833,15 +833,15 @@
   }
 }
 
-static bool set_bool_flag(const char* name, bool value, Flag::Flags origin) {
-  if (CommandLineFlags::boolAtPut(name, &value, origin) == Flag::SUCCESS) {
+static bool set_bool_flag(const char* name, bool value, JVMFlag::Flags origin) {
+  if (JVMFlag::boolAtPut(name, &value, origin) == JVMFlag::SUCCESS) {
     return true;
   } else {
     return false;
   }
 }
 
-static bool set_fp_numeric_flag(const char* name, char* value, Flag::Flags origin) {
+static bool set_fp_numeric_flag(const char* name, char* value, JVMFlag::Flags origin) {
   char* end;
   errno = 0;
   double v = strtod(value, &end);
@@ -849,18 +849,18 @@
     return false;
   }
 
-  if (CommandLineFlags::doubleAtPut(name, &v, origin) == Flag::SUCCESS) {
+  if (JVMFlag::doubleAtPut(name, &v, origin) == JVMFlag::SUCCESS) {
     return true;
   }
   return false;
 }
 
-static bool set_numeric_flag(const char* name, char* value, Flag::Flags origin) {
+static bool set_numeric_flag(const char* name, char* value, JVMFlag::Flags origin) {
   julong v;
   int int_v;
   intx intx_v;
   bool is_neg = false;
-  Flag* result = Flag::find_flag(name, strlen(name));
+  JVMFlag* result = JVMFlag::find_flag(name, strlen(name));
 
   if (result == NULL) {
     return false;
@@ -882,43 +882,43 @@
     if (is_neg) {
       int_v = -int_v;
     }
-    return CommandLineFlags::intAtPut(result, &int_v, origin) == Flag::SUCCESS;
+    return JVMFlag::intAtPut(result, &int_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_uint()) {
     uint uint_v = (uint) v;
-    return CommandLineFlags::uintAtPut(result, &uint_v, origin) == Flag::SUCCESS;
+    return JVMFlag::uintAtPut(result, &uint_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_intx()) {
     intx_v = (intx) v;
     if (is_neg) {
       intx_v = -intx_v;
     }
-    return CommandLineFlags::intxAtPut(result, &intx_v, origin) == Flag::SUCCESS;
+    return JVMFlag::intxAtPut(result, &intx_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_uintx()) {
     uintx uintx_v = (uintx) v;
-    return CommandLineFlags::uintxAtPut(result, &uintx_v, origin) == Flag::SUCCESS;
+    return JVMFlag::uintxAtPut(result, &uintx_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_uint64_t()) {
     uint64_t uint64_t_v = (uint64_t) v;
-    return CommandLineFlags::uint64_tAtPut(result, &uint64_t_v, origin) == Flag::SUCCESS;
+    return JVMFlag::uint64_tAtPut(result, &uint64_t_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_size_t()) {
     size_t size_t_v = (size_t) v;
-    return CommandLineFlags::size_tAtPut(result, &size_t_v, origin) == Flag::SUCCESS;
+    return JVMFlag::size_tAtPut(result, &size_t_v, origin) == JVMFlag::SUCCESS;
   } else if (result->is_double()) {
     double double_v = (double) v;
-    return CommandLineFlags::doubleAtPut(result, &double_v, origin) == Flag::SUCCESS;
+    return JVMFlag::doubleAtPut(result, &double_v, origin) == JVMFlag::SUCCESS;
   } else {
     return false;
   }
 }
 
-static bool set_string_flag(const char* name, const char* value, Flag::Flags origin) {
-  if (CommandLineFlags::ccstrAtPut(name, &value, origin) != Flag::SUCCESS) return false;
-  // Contract:  CommandLineFlags always returns a pointer that needs freeing.
+static bool set_string_flag(const char* name, const char* value, JVMFlag::Flags origin) {
+  if (JVMFlag::ccstrAtPut(name, &value, origin) != JVMFlag::SUCCESS) return false;
+  // Contract:  JVMFlag always returns a pointer that needs freeing.
   FREE_C_HEAP_ARRAY(char, value);
   return true;
 }
 
-static bool append_to_string_flag(const char* name, const char* new_value, Flag::Flags origin) {
+static bool append_to_string_flag(const char* name, const char* new_value, JVMFlag::Flags origin) {
   const char* old_value = "";
-  if (CommandLineFlags::ccstrAt(name, &old_value) != Flag::SUCCESS) return false;
+  if (JVMFlag::ccstrAt(name, &old_value) != JVMFlag::SUCCESS) return false;
   size_t old_len = old_value != NULL ? strlen(old_value) : 0;
   size_t new_len = strlen(new_value);
   const char* value;
@@ -935,11 +935,11 @@
     value = buf;
     free_this_too = buf;
   }
-  (void) CommandLineFlags::ccstrAtPut(name, &value, origin);
-  // CommandLineFlags always returns a pointer that needs freeing.
+  (void) JVMFlag::ccstrAtPut(name, &value, origin);
+  // JVMFlag always returns a pointer that needs freeing.
   FREE_C_HEAP_ARRAY(char, value);
   if (free_this_too != NULL) {
-    // CommandLineFlags made its own copy, so I must delete my own temp. buffer.
+    // JVMFlag made its own copy, so I must delete my own temp. buffer.
     FREE_C_HEAP_ARRAY(char, free_this_too);
   }
   return true;
@@ -1010,7 +1010,7 @@
   return a;
 }
 
-bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
+bool Arguments::parse_argument(const char* arg, JVMFlag::Flags origin) {
 
   // range of acceptable characters spelled out for portability reasons
 #define NAME_RANGE  "[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_]"
@@ -1048,7 +1048,7 @@
   char punct;
   if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "%c", name, &punct) == 2 && punct == '=') {
     const char* value = strchr(arg, '=') + 1;
-    Flag* flag;
+    JVMFlag* flag;
 
     // this scanf pattern matches both strings (handled here) and numbers (handled later))
     AliasedLoggingFlag alf = catch_logging_aliases(name, true);
@@ -1060,7 +1060,7 @@
     if (real_name == NULL) {
       return false;
     }
-    flag = Flag::find_flag(real_name);
+    flag = JVMFlag::find_flag(real_name);
     if (flag != NULL && flag->is_ccstr()) {
       if (flag->ccstr_accumulates()) {
         return append_to_string_flag(real_name, value, origin);
@@ -1221,7 +1221,7 @@
 
 bool Arguments::process_argument(const char* arg,
                                  jboolean ignore_unrecognized,
-                                 Flag::Flags origin) {
+                                 JVMFlag::Flags origin) {
   JDK_Version since = JDK_Version();
 
   if (parse_argument(arg, origin)) {
@@ -1266,10 +1266,10 @@
 
   // For locked flags, report a custom error message if available.
   // Otherwise, report the standard unrecognized VM option.
-  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
+  JVMFlag* found_flag = JVMFlag::find_flag((const char*)argname, arg_len, true, true);
   if (found_flag != NULL) {
     char locked_message_buf[BUFLEN];
-    Flag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN);
+    JVMFlag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN);
     if (strlen(locked_message_buf) == 0) {
       if (found_flag->is_bool() && !has_plus_minus) {
         jio_fprintf(defaultStream::error_stream(),
@@ -1283,8 +1283,8 @@
       }
     } else {
 #ifdef PRODUCT
-      bool mismatched = ((msg_type == Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD) ||
-                         (msg_type == Flag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD));
+      bool mismatched = ((msg_type == JVMFlag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD) ||
+                         (msg_type == JVMFlag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD));
       if (ignore_unrecognized && mismatched) {
         return true;
       }
@@ -1297,7 +1297,7 @@