changeset 60096:9ecc33cc755c stats-before-this-super

Merging recent default branch changes into stats-before-this-super.
author jlahoda
date Wed, 04 Mar 2020 12:01:01 +0100
parents 2568bcf17169 87651cb03ebc
children e2821df4e6aa
files make/CopyInterimCLDRConverter.gmk src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip-utils/dist/jszip-utils-ie.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip-utils/dist/jszip-utils-ie.min.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip-utils/dist/jszip-utils.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip-utils/dist/jszip-utils.min.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip/dist/jszip.js src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/script-dir/jszip/dist/jszip.min.js src/jdk.javadoc/share/legal/jszip.md test/hotspot/jtreg/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java test/jdk/java/net/httpclient/ssltest/bad.keystore test/jdk/java/net/httpclient/ssltest/good.keystore test/jdk/java/net/httpclient/ssltest/loopback.keystore
diffstat 329 files changed, 7160 insertions(+), 15023 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Feb 07 20:40:59 2020 +0000
+++ b/.hgtags	Wed Mar 04 12:01:01 2020 +0100
@@ -619,3 +619,4 @@
 4a87bb7ebfd7f6a25ec59a5982fe3607242777f8 jdk-14+35
 62b5bfef8d618e08e6f3a56cf1fb0e67e89e9cc2 jdk-15+9
 bc54620a3848c26cff9766e5e2a6e5ddab98ed18 jdk-14+36
+1bee69801aeea1a34261c93f35bc9de072a98704 jdk-15+10
--- a/bin/idea.sh	Fri Feb 07 20:40:59 2020 +0000
+++ b/bin/idea.sh	Wed Mar 04 12:01:01 2020 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-# Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -113,6 +113,18 @@
   echo "FATAL: SPEC is empty" >&2; exit 1
 fi
 
+if [ -d "$TOPLEVEL_DIR/.hg" ] ; then
+    VCS_TYPE="hg4idea"
+fi
+
+if [ -d "$TOPLEVEL_DIR/.git" ] ; then
+    VCS_TYPE="Git"
+fi
+
+if [ "x$VCS_TYPE" = "x" ] ; then
+  echo "FATAL: VCS_TYPE is empty" >&2; exit 1
+fi
+
 ### Replace template variables
 
 NUM_REPLACEMENTS=0
@@ -137,6 +149,7 @@
 }
 
 add_replacement "###MODULE_NAMES###" "$MODULE_NAMES"
+add_replacement "###VCS_TYPE###" "$VCS_TYPE"
 SPEC_DIR=`dirname $SPEC`
 if [ "x$CYGPATH" = "x" ]; then
     add_replacement "###BUILD_DIR###" "$SPEC_DIR"
--- a/make/CompileToolsJdk.gmk	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/CompileToolsJdk.gmk	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,6 @@
 # Use += to be able to add to this from a custom extension
 BUILD_TOOLS_SRC_DIRS += \
     $(TOPDIR)/make/jdk/src/classes \
-    $(BUILDTOOLS_OUTPUTDIR)/interim_cldrconverter_classes \
     $(BUILDTOOLS_OUTPUTDIR)/interim_tzdb_classes \
     #
 
--- a/make/CopyInterimCLDRConverter.gmk	Fri Feb 07 20:40:59 2020 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-#
-# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-default: all
-
-include $(SPEC)
-include MakeBase.gmk
-
-##########################################################################################
-
-### CLDRConverter needs the JRE time zone names from the java.base source.
-
-define cldrconverter_copytznames
-	$(call MakeTargetDir)
-	$(RM) '$@'
-	$(SED) -e "s/package sun.util.resources/package build.tools.cldrconverter/" \
-        -e "s/extends TimeZoneNamesBundle//" \
-        -e "s/protected final/static final/" \
-        < $(<) > $@
-endef
-
-$(eval $(call SetupCopyFiles,COPY_INTERIM_CLDRCONVERTER, \
-    SRC := $(TOPDIR)/src/java.base/share/classes/sun/util/resources, \
-    DEST := $(BUILDTOOLS_OUTPUTDIR)/interim_cldrconverter_classes/build/tools/cldrconverter, \
-    FILES := TimeZoneNames.java, \
-    MACRO := cldrconverter_copytznames))
-
-##########################################################################################
-
-all: $(COPY_INTERIM_CLDRCONVERTER)
--- a/make/GenerateLinkOptData.gmk	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/GenerateLinkOptData.gmk	Wed Mar 04 12:01:01 2020 +0100
@@ -75,6 +75,7 @@
 	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
 	    -Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
 	    -Duser.language=en -Duser.country=US \
+	    --module-path $(SUPPORT_OUTPUTDIR)/classlist.jar \
 	    -cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
 	    build.tools.classlist.HelloClasslist \
 	    2> $(LINK_OPT_DIR)/stderr > $(JLI_TRACE_FILE) \
--- a/make/Main.gmk	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/Main.gmk	Wed Mar 04 12:01:01 2020 +0100
@@ -75,9 +75,6 @@
 interim-rmic:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CompileInterimRmic.gmk)
 
-interim-cldrconverter:
-	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CopyInterimCLDRConverter.gmk)
-
 interim-tzdb:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CopyInterimTZDB.gmk)
 
@@ -92,7 +89,7 @@
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CompileToolsHotspot.gmk)
 
 ALL_TARGETS += buildtools-langtools interim-langtools \
-    interim-rmic interim-cldrconverter interim-tzdb buildtools-jdk buildtools-modules \
+    interim-rmic interim-tzdb buildtools-jdk buildtools-modules \
     buildtools-hotspot
 
 ################################################################################
@@ -677,7 +674,7 @@
 
   interim-langtools: $(INTERIM_LANGTOOLS_GENSRC_TARGETS)
 
-  buildtools-jdk: interim-langtools interim-cldrconverter interim-tzdb
+  buildtools-jdk: interim-langtools interim-tzdb
 
   buildtools-hotspot: interim-langtools
 
--- a/make/conf/jib-profiles.js	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/conf/jib-profiles.js	Wed Mar 04 12:01:01 2020 +0100
@@ -974,7 +974,7 @@
         solaris_x64: "SS12u4-Solaris11u1+1.0",
         solaris_sparcv9: "SS12u6-Solaris11u3+1.0",
         windows_x64: "VS2017-15.9.16+1.0",
-        linux_aarch64: "gcc8.2.0-Fedora27+1.0",
+        linux_aarch64: "gcc8.3.0-OL7.6+1.0",
         linux_arm: "gcc8.2.0-Fedora27+1.0",
         linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
         linux_s390x: "gcc8.2.0-Fedora27+1.0"
@@ -1004,9 +1004,17 @@
         ? input.get("gnumake", "install_path") + "/cygwin/bin"
         : input.get("gnumake", "install_path") + "/bin");
 
-    var dependencies = {
-
-        boot_jdk: {
+    if (input.build_cpu == 'aarch64') {
+	boot_jdk = {
+            organization: common.organization,
+            ext: "tar.gz",
+            module: "jdk-linux_aarch64",
+            revision: "13+1.0",
+            configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
+            environment_path: common.boot_jdk_home + "/bin"
+	}
+    } else {
+	boot_jdk = {
             server: "jpg",
             product: "jdk",
             version: common.boot_jdk_version,
@@ -1015,7 +1023,11 @@
                 + boot_jdk_platform + "_bin" + boot_jdk_ext,
             configure_args: "--with-boot-jdk=" + common.boot_jdk_home,
             environment_path: common.boot_jdk_home + "/bin"
-        },
+	}
+    }
+
+    var dependencies = {
+        boot_jdk: boot_jdk,
 
         devkit: {
             organization: common.organization,
--- a/make/devkit/Tools.gmk	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/devkit/Tools.gmk	Wed Mar 04 12:01:01 2020 +0100
@@ -51,9 +51,17 @@
 
 $(info ARCH=$(ARCH))
 
+KERNEL_HEADERS_RPM := kernel-headers
+
 ifeq ($(BASE_OS), OL)
-  BASE_URL := http://yum.oracle.com/repo/OracleLinux/OL6/4/base/$(ARCH)/
-  LINUX_VERSION := OL6.4
+  ifeq ($(ARCH), aarch64)
+    BASE_URL := http://yum.oracle.com/repo/OracleLinux/OL7/6/base/$(ARCH)/
+    LINUX_VERSION := OL7.6
+    KERNEL_HEADERS_RPM := kernel-uek-headers
+  else
+    BASE_URL := http://yum.oracle.com/repo/OracleLinux/OL6/4/base/$(ARCH)/
+    LINUX_VERSION := OL6.4
+  endif
 else ifeq ($(BASE_OS), Fedora)
   DEFAULT_OS_VERSION := 27
   ifeq ($(BASE_OS_VERSION), )
@@ -118,7 +126,7 @@
 
 # RPMs used by all BASE_OS
 RPM_LIST := \
-    kernel-headers \
+    $(KERNEL_HEADERS_RPM) \
     glibc glibc-headers glibc-devel \
     cups-libs cups-devel \
     libX11 libX11-devel \
--- a/make/idea/template/vcs.xml	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/idea/template/vcs.xml	Wed Mar 04 12:01:01 2020 +0100
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="VcsDirectoryMappings">
-    <mapping directory="###ROOT_DIR###" vcs="hg4idea" />
+    <mapping directory="###ROOT_DIR###" vcs="###VCS_TYPE###" />
   </component>
 </project>
--- a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Wed Mar 04 12:01:01 2020 +0100
@@ -31,6 +31,9 @@
  */
 package build.tools.classlist;
 
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
 import java.net.InetAddress;
 import java.nio.file.FileSystems;
 import java.time.LocalDateTime;
@@ -55,19 +58,20 @@
 
     private static final Logger LOGGER = Logger.getLogger("Hello");
 
-    public static void main(String ... args) {
+    public static void main(String ... args) throws Throwable {
 
         FileSystems.getDefault();
 
         List<String> strings = Arrays.asList("Hello", "World!", "From: ",
-              InetAddress.getLoopbackAddress().toString());
+                InetAddress.getLoopbackAddress().toString());
 
         String helloWorld = strings.parallelStream()
-              .map(s -> s.toLowerCase(Locale.ROOT))
-              .collect(joining(","));
+                .map(s -> s.toLowerCase(Locale.ROOT))
+                .collect(joining(","));
 
-        Stream.of(helloWorld.split(","))
-              .forEach(System.out::println);
+        Stream.of(helloWorld.split("([,x-z]{1,3})([\\s]*)"))
+                .map(String::toString)
+                .forEach(System.out::println);
 
         // Common concatenation patterns
         String SS     = String.valueOf(args.length) + String.valueOf(args.length);
@@ -83,6 +87,10 @@
         String SCSCS  = String.valueOf(args.length) + "string" + String.valueOf(args.length) + "string" + String.valueOf(args.length);
         String CI     = "string" + args.length;
         String IC     = args.length + "string";
+        String SI     = String.valueOf(args.length) + args.length;
+        String IS     = args.length + String.valueOf(args.length);
+        String CIS    = "string" + args.length + String.valueOf(args.length);
+        String CSCI   = "string" + String.valueOf(args.length) + "string" + args.length;
         String CIC    = "string" + args.length + "string";
         String CICI   = "string" + args.length + "string" + args.length;
         String CJ     = "string" + System.currentTimeMillis();
@@ -99,7 +107,31 @@
                 DateFormat.getDateInstance(DateFormat.DEFAULT, Locale.ROOT)
                         .format(new Date()));
 
+        // A selection of trivial and relatively common MH operations
+        invoke(MethodHandles.identity(double.class), 1.0);
+        invoke(MethodHandles.identity(int.class), 1);
+        invoke(MethodHandles.identity(String.class), "x");
+
+        invoke(handle("staticMethod_V", MethodType.methodType(void.class)));
+
         LOGGER.log(Level.FINE, "New Date: " + newDate + " - old: " + oldDate);
     }
 
+    public static void staticMethod_V() {}
+
+    private static MethodHandle handle(String name, MethodType type) throws Throwable {
+        return MethodHandles.lookup().findStatic(HelloClasslist.class, name, type);
+    }
+
+    private static Object invoke(MethodHandle mh, Object ... args) throws Throwable {
+        try {
+            for (Object o : args) {
+                mh = MethodHandles.insertArguments(mh, 0, o);
+            }
+            return mh.invoke();
+        } catch (Throwable t) {
+            LOGGER.warning("Failed to find, link and/or invoke " + mh.toString() + ": " + t.getMessage());
+            throw t;
+        }
+    }
 }
--- a/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Wed Mar 04 12:01:01 2020 +0100
@@ -294,7 +294,6 @@
         }
 
         // First, weed out any empty timezone or metazone names from myMap.
-        // Fill in any missing abbreviations if locale is "en".
         for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
             String key = it.next();
             if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
@@ -307,10 +306,6 @@
                     it.remove();
                     continue;
                 }
-
-                if (id.equals("en")) {
-                    fillInJREs(key, nameMap);
-                }
             }
         }
         for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
@@ -636,42 +631,6 @@
         return null;
     }
 
-    static List<Object[]> jreTimeZoneNames = Arrays.asList(TimeZoneNames.getContents());
-    private void fillInJREs(String key, Map<String, String> map) {
-        String tzid = null;
-
-        if (key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
-            // Look for tzid
-            String meta = key.substring(CLDRConverter.METAZONE_ID_PREFIX.length());
-            if (meta.equals("GMT")) {
-                tzid = meta;
-            } else {
-                for (String tz : CLDRConverter.handlerMetaZones.keySet()) {
-                    if (CLDRConverter.handlerMetaZones.get(tz).equals(meta)) {
-                        tzid = tz;
-                        break;
-                    }
-                }
-            }
-        } else {
-            tzid = key.substring(CLDRConverter.TIMEZONE_ID_PREFIX.length());
-        }
-
-        if (tzid != null) {
-            for (Object[] jreZone : jreTimeZoneNames) {
-                if (jreZone[0].equals(tzid)) {
-                    for (int i = 0; i < ZONE_NAME_KEYS.length; i++) {
-                        if (map.get(ZONE_NAME_KEYS[i]) == null) {
-                            String[] jreNames = (String[])jreZone[1];
-                            map.put(ZONE_NAME_KEYS[i], jreNames[i]);
-                        }
-                    }
-                    break;
-                }
-            }
-        }
-    }
-
     /**
      * Perform a generic conversion of CLDR date-time format pattern letter based
      * on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 
 package build.tools.cldrconverter;
 
-import static build.tools.cldrconverter.Bundle.jreTimeZoneNames;
 import build.tools.cldrconverter.BundleGenerator.BundleType;
 import java.io.File;
 import java.io.IOException;
@@ -89,7 +88,9 @@
     static final String ZONE_NAME_PREFIX = "timezone.displayname.";
     static final String METAZONE_ID_PREFIX = "metazone.id.";
     static final String PARENT_LOCALE_PREFIX = "parentLocale.";
+    static final String META_EMPTY_ZONE_NAME = "EMPTY_ZONE";
     static final String[] EMPTY_ZONE = {"", "", "", "", "", ""};
+    static final String META_ETCUTC_ZONE_NAME = "ETC_UTC";
 
     private static SupplementDataParseHandler handlerSuppl;
     private static LikelySubtagsParseHandler handlerLikelySubtags;
@@ -686,60 +687,6 @@
     private static Map<String, Object> extractZoneNames(Map<String, Object> map, String id) {
         Map<String, Object> names = new HashMap<>();
 
-        // Copy over missing time zone ids from JRE for English locale
-        if (id.equals("en")) {
-            Map<String[], String> jreMetaMap = new HashMap<>();
-            jreTimeZoneNames.stream().forEach(e -> {
-                String tzid = (String)e[0];
-                String[] data = (String[])e[1];
-
-                if (map.get(TIMEZONE_ID_PREFIX + tzid) == null &&
-                    handlerMetaZones.get(tzid) == null ||
-                    handlerMetaZones.get(tzid) != null &&
-                    map.get(METAZONE_ID_PREFIX + handlerMetaZones.get(tzid)) == null) {
-
-                    // First, check the alias
-                    String canonID = canonicalTZMap.get(tzid);
-                    if (canonID != null && !tzid.equals(canonID)) {
-                        Object value = map.get(TIMEZONE_ID_PREFIX + canonID);
-                        if (value != null) {
-                            names.put(tzid, value);
-                            return;
-                        } else {
-                            String meta = handlerMetaZones.get(canonID);
-                            if (meta != null) {
-                                value = map.get(METAZONE_ID_PREFIX + meta);
-                                if (value != null) {
-                                    names.put(tzid, meta);
-                                    return;
-                                }
-                            }
-                        }
-                    }
-
-                    // Check the CLDR meta key
-                    Optional<Map.Entry<String, String>> cldrMeta =
-                        handlerMetaZones.getData().entrySet().stream()
-                            .filter(me ->
-                                Arrays.deepEquals(data,
-                                    (String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
-                            .findAny();
-                    cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
-                        // Check the JRE meta key, add if there is not.
-                        Optional<Map.Entry<String[], String>> jreMeta =
-                            jreMetaMap.entrySet().stream()
-                                .filter(jm -> Arrays.deepEquals(data, jm.getKey()))
-                                .findAny();
-                        jreMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
-                                String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
-                                names.put(METAZONE_ID_PREFIX + metaName, data);
-                                names.put(tzid, metaName);
-                        });
-                    });
-                }
-            });
-        }
-
         getAvailableZoneIds().stream().forEach(tzid -> {
             // If the tzid is deprecated, get the data for the replacement id
             String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
@@ -747,7 +694,14 @@
             Object data = map.get(TIMEZONE_ID_PREFIX + tzKey);
 
             if (data instanceof String[]) {
-                names.put(tzid, data);
+                // Hack for UTC. UTC is an alias to Etc/UTC in CLDR
+                if (tzid.equals("Etc/UTC") && !map.containsKey(TIMEZONE_ID_PREFIX + "UTC")) {
+                    names.put(METAZONE_ID_PREFIX + META_ETCUTC_ZONE_NAME, data);
+                    names.put(tzid, META_ETCUTC_ZONE_NAME);
+                    names.put("UTC", META_ETCUTC_ZONE_NAME);
+                } else {
+                    names.put(tzid, data);
+                }
             } else {
                 String meta = handlerMetaZones.get(tzKey);
                 if (meta != null) {
@@ -764,23 +718,22 @@
 
         // exemplar cities.
         Map<String, Object> exCities = map.entrySet().stream()
-                .filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
-                .collect(Collectors
-                        .toMap(Map.Entry::getKey, Map.Entry::getValue));
+            .filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
+            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
         names.putAll(exCities);
 
-        if (!id.equals("en") &&
-            !names.isEmpty()) {
-            // CLDR does not have UTC entry, so add it here.
-            names.put("UTC", EMPTY_ZONE);
+        // If there's no UTC entry at this point, add an empty one
+        if (!names.isEmpty() && !names.containsKey("UTC")) {
+            names.putIfAbsent(METAZONE_ID_PREFIX + META_EMPTY_ZONE_NAME, EMPTY_ZONE);
+            names.put("UTC", META_EMPTY_ZONE_NAME);
+        }
 
-            // no metazone zones
-            Arrays.asList(handlerMetaZones.get(MetaZonesParseHandler.NO_METAZONE_KEY)
-                .split("\\s")).stream()
-                .forEach(tz -> {
-                    names.put(tz, EMPTY_ZONE);
-                });
-        }
+        // Finally some compatibility stuff
+        ZoneId.SHORT_IDS.entrySet().stream()
+            .filter(e -> !names.containsKey(e.getKey()) && names.containsKey(e.getValue()))
+            .forEach(e -> {
+                names.put(e.getKey(), names.get(e.getValue()));
+            });
 
         return names;
     }
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -2257,8 +2257,7 @@
   Unimplemented();
 }
 
-// Advertise here if the CPU requires explicit rounding operations to
-// implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Are floats converted to double when stored to stack during
@@ -17313,7 +17312,7 @@
 instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
-  match(Set dst (LShiftVB src shift));
+  match(Set dst (LShiftVB src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
@@ -17332,7 +17331,7 @@
 
 instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 16);
-  match(Set dst (LShiftVB src shift));
+  match(Set dst (LShiftVB src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
@@ -17352,7 +17351,7 @@
 instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
-  match(Set dst (RShiftVB src shift));
+  match(Set dst (RShiftVB src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
@@ -17366,7 +17365,7 @@
 
 instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 16);
-  match(Set dst (RShiftVB src shift));
+  match(Set dst (RShiftVB src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
@@ -17381,7 +17380,7 @@
 instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 4 ||
             n->as_Vector()->length() == 8);
-  match(Set dst (URShiftVB src shift));
+  match(Set dst (URShiftVB src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (8B)" %}
   ins_encode %{
@@ -17400,7 +17399,7 @@
 
 instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 16);
-  match(Set dst (URShiftVB src shift));
+  match(Set dst (URShiftVB src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (16B)" %}
   ins_encode %{
@@ -17517,7 +17516,7 @@
 instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
-  match(Set dst (LShiftVS src shift));
+  match(Set dst (LShiftVS src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
@@ -17536,7 +17535,7 @@
 
 instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 8);
-  match(Set dst (LShiftVS src shift));
+  match(Set dst (LShiftVS src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
@@ -17556,7 +17555,7 @@
 instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
-  match(Set dst (RShiftVS src shift));
+  match(Set dst (RShiftVS src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
@@ -17570,7 +17569,7 @@
 
 instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 8);
-  match(Set dst (RShiftVS src shift));
+  match(Set dst (RShiftVS src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
@@ -17585,7 +17584,7 @@
 instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2 ||
             n->as_Vector()->length() == 4);
-  match(Set dst (URShiftVS src shift));
+  match(Set dst (URShiftVS src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (4H)" %}
   ins_encode %{
@@ -17604,7 +17603,7 @@
 
 instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 8);
-  match(Set dst (URShiftVS src shift));
+  match(Set dst (URShiftVS src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (8H)" %}
   ins_encode %{
@@ -17717,7 +17716,7 @@
 
 instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (LShiftVI src shift));
+  match(Set dst (LShiftVI src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (2S)" %}
   ins_encode %{
@@ -17730,7 +17729,7 @@
 
 instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 4);
-  match(Set dst (LShiftVI src shift));
+  match(Set dst (LShiftVI src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (4S)" %}
   ins_encode %{
@@ -17743,7 +17742,7 @@
 
 instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (RShiftVI src shift));
+  match(Set dst (RShiftVI src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (2S)" %}
   ins_encode %{
@@ -17756,7 +17755,7 @@
 
 instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 4);
-  match(Set dst (RShiftVI src shift));
+  match(Set dst (RShiftVI src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (4S)" %}
   ins_encode %{
@@ -17769,7 +17768,7 @@
 
 instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (URShiftVI src shift));
+  match(Set dst (URShiftVI src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (2S)" %}
   ins_encode %{
@@ -17782,7 +17781,7 @@
 
 instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 4);
-  match(Set dst (URShiftVI src shift));
+  match(Set dst (URShiftVI src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (4S)" %}
   ins_encode %{
@@ -17842,7 +17841,7 @@
 
 instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (LShiftVL src shift));
+  match(Set dst (LShiftVL src (LShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "shl    $dst, $src, $shift\t# vector (2D)" %}
   ins_encode %{
@@ -17855,7 +17854,7 @@
 
 instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (RShiftVL src shift));
+  match(Set dst (RShiftVL src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "sshr    $dst, $src, $shift\t# vector (2D)" %}
   ins_encode %{
@@ -17868,7 +17867,7 @@
 
 instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (URShiftVL src shift));
+  match(Set dst (URShiftVL src (RShiftCntV shift)));
   ins_cost(INSN_COST);
   format %{ "ushr    $dst, $src, $shift\t# vector (2D)" %}
   ins_encode %{
--- a/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -61,7 +61,6 @@
 define_pd_global(bool, CICompileOSR,                 true );
 #endif // !TIERED
 define_pd_global(bool, UseTypeProfile,               false);
-define_pd_global(bool, RoundFPResults,               true );
 
 define_pd_global(bool, LIRFillDelaySlots,            false);
 define_pd_global(bool, OptimizeSinglePrecision,      true );
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -59,16 +59,8 @@
   address   unextended_sp = (address)_unextended_sp;
 
   // consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
-    (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
-  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
-
   // sp must be within the usable part of the stack (not in guards)
-  bool sp_safe = (sp < thread->stack_base()) &&
-                 (sp >= thread->stack_base() - usable_stack_size);
-
-
-  if (!sp_safe) {
+  if (!thread->is_in_usable_stack(sp)) {
     return false;
   }
 
@@ -566,7 +558,7 @@
 
   address locals =  (address) *interpreter_frame_locals_addr();
 
-  if (locals > thread->stack_base() || locals < (address) fp()) return false;
+  if (locals >= thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
   return true;
--- a/src/hotspot/cpu/arm/arm.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/arm/arm.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -1140,8 +1140,7 @@
 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 }
 
-// Advertise here if the CPU requires explicit rounding operations
-// to implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Are floats converted to double when stored to stack during deoptimization?
@@ -10619,7 +10618,7 @@
 
 instruct vsl8B_immI(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 8);
-  match(Set dst (LShiftVB src shift));
+  match(Set dst (LShiftVB src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10635,7 +10634,7 @@
 
 instruct vsl16B_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 16);
-  match(Set dst (LShiftVB src shift));
+  match(Set dst (LShiftVB src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10674,7 +10673,7 @@
 
 instruct vsl4S_immI(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 4);
-  match(Set dst (LShiftVS src shift));
+  match(Set dst (LShiftVS src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10729,7 +10728,7 @@
 
 instruct vsl2I_immI(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
-  match(Set dst (LShiftVI src shift));
+  match(Set dst (LShiftVI src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10745,7 +10744,7 @@
 
 instruct vsl4I_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
-  match(Set dst (LShiftVI src shift));
+  match(Set dst (LShiftVI src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10773,7 +10772,7 @@
 
 instruct vsl2L_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (LShiftVL src shift));
+  match(Set dst (LShiftVL src (LShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10796,7 +10795,7 @@
 // Chars vector logical right shift
 instruct vsrl4S_immI(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 4);
-  match(Set dst (URShiftVS src shift));
+  match(Set dst (URShiftVS src (RShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10812,7 +10811,7 @@
 
 instruct vsrl8S_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 8);
-  match(Set dst (URShiftVS src shift));
+  match(Set dst (URShiftVS src (RShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10829,7 +10828,7 @@
 // Integers vector logical right shift
 instruct vsrl2I_immI(vecD dst, vecD src, immI shift) %{
   predicate(n->as_Vector()->length() == 2 && VM_Version::has_simd());
-  match(Set dst (URShiftVI src shift));
+  match(Set dst (URShiftVI src (RShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10845,7 +10844,7 @@
 
 instruct vsrl4I_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 4 && VM_Version::has_simd());
-  match(Set dst (URShiftVI src shift));
+  match(Set dst (URShiftVI src (RShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
@@ -10862,7 +10861,7 @@
 // Longs vector logical right shift
 instruct vsrl2L_immI(vecX dst, vecX src, immI shift) %{
   predicate(n->as_Vector()->length() == 2);
-  match(Set dst (URShiftVL src shift));
+  match(Set dst (URShiftVL src (RShiftCntV shift)));
   size(4);
   ins_cost(DEFAULT_COST); // FIXME
   format %{
--- a/src/hotspot/cpu/arm/c1_globals_arm.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/arm/c1_globals_arm.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -62,8 +62,6 @@
 define_pd_global(bool, CICompileOSR,                 true );
 #endif // COMPILER2
 define_pd_global(bool, UseTypeProfile,               false);
-define_pd_global(bool, RoundFPResults,               false);
-
 
 define_pd_global(bool, LIRFillDelaySlots,            false);
 define_pd_global(bool, OptimizeSinglePrecision,      true);
--- a/src/hotspot/cpu/arm/frame_arm.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/arm/frame_arm.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,21 +57,14 @@
   address   fp = (address)_fp;
   address   unextended_sp = (address)_unextended_sp;
 
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
-    (JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size()) : 0;
-  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
-
+  // consider stack guards when trying to determine "safe" stack pointers
   // sp must be within the usable part of the stack (not in guards)
-  bool sp_safe = (sp != NULL &&
-                 (sp <= thread->stack_base()) &&
-                 (sp >= thread->stack_base() - usable_stack_size));
-
-  if (!sp_safe) {
+  if (!thread->is_in_usable_stack(sp)) {
     return false;
   }
 
   bool unextended_sp_safe = (unextended_sp != NULL &&
-                             (unextended_sp <= thread->stack_base()) &&
+                             (unextended_sp < thread->stack_base()) &&
                              (unextended_sp >= sp));
   if (!unextended_sp_safe) {
     return false;
@@ -80,7 +73,7 @@
   // We know sp/unextended_sp are safe. Only fp is questionable here.
 
   bool fp_safe = (fp != NULL &&
-                  (fp <= thread->stack_base()) &&
+                  (fp < thread->stack_base()) &&
                   fp >= sp);
 
   if (_cb != NULL ) {
@@ -148,7 +141,7 @@
       // is really a frame pointer.
 
       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
-      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
+      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
 
       if (!saved_fp_safe) {
         return false;
@@ -178,7 +171,7 @@
     // Could be the call_stub
     if (StubRoutines::returns_to_call_stub(sender_pc)) {
       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset + link_offset);
-      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp >= sender_sp);
+      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
 
       if (!saved_fp_safe) {
         return false;
@@ -191,7 +184,7 @@
       // Validate the JavaCallWrapper an entry frame must have
       address jcw = (address)sender.entry_frame_call_wrapper();
 
-      bool jcw_safe = (jcw <= thread->stack_base()) && (jcw > (address)sender.fp());
+      bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)sender.fp());
 
       return jcw_safe;
     }
@@ -501,7 +494,7 @@
 
   address locals =  (address) *interpreter_frame_locals_addr();
 
-  if (locals > thread->stack_base() || locals < (address) fp()) return false;
+  if (locals >= thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
 
--- a/src/hotspot/cpu/ppc/c1_globals_ppc.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/ppc/c1_globals_ppc.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -62,7 +62,6 @@
 #endif // !TIERED
 
 define_pd_global(bool,     UseTypeProfile,               false);
-define_pd_global(bool,     RoundFPResults,               false);
 
 define_pd_global(bool,     LIRFillDelaySlots,            false);
 define_pd_global(bool,     OptimizeSinglePrecision,      false);
--- a/src/hotspot/cpu/ppc/frame_ppc.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/ppc/frame_ppc.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -55,17 +55,9 @@
   address fp = (address)_fp;
   address unextended_sp = (address)_unextended_sp;
 
-  // Consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
-    JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_reserved_zone_size() : 0;
-  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
-
+  // consider stack guards when trying to determine "safe" stack pointers
   // sp must be within the usable part of the stack (not in guards)
-  bool sp_safe = (sp < thread->stack_base()) &&
-                 (sp >= thread->stack_base() - usable_stack_size);
-
-
-  if (!sp_safe) {
+  if (!thread->is_in_usable_stack(sp)) {
     return false;
   }
 
@@ -77,10 +69,10 @@
   }
 
   // An fp must be within the stack and above (but not equal) sp.
-  bool fp_safe = (fp <= thread->stack_base()) &&  (fp > sp);
+  bool fp_safe = (fp < thread->stack_base()) && (fp > sp);
   // An interpreter fp must be within the stack and above (but not equal) sp.
   // Moreover, it must be at least the size of the ijava_state structure.
-  bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) &&
+  bool fp_interp_safe = (fp < thread->stack_base()) && (fp > sp) &&
     ((fp - sp) >= ijava_state_size);
 
   // We know sp/unextended_sp are safe, only fp is questionable here
@@ -140,7 +132,7 @@
 
     // sender_fp must be within the stack and above (but not
     // equal) current frame's fp.
-    if (sender_fp > thread->stack_base() || sender_fp <= fp) {
+    if (sender_fp >= thread->stack_base() || sender_fp <= fp) {
         return false;
     }
 
--- a/src/hotspot/cpu/ppc/ppc.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/ppc/ppc.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -2501,8 +2501,7 @@
  Unimplemented();
 }
 
-// Advertise here if the CPU requires explicit rounding operations
-// to implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Do floats take an entire double register or just half?
--- a/src/hotspot/cpu/s390/c1_globals_s390.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/s390/c1_globals_s390.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -63,7 +63,6 @@
 #endif // !TIERED
 
 define_pd_global(bool,     UseTypeProfile,               false);
-define_pd_global(bool,     RoundFPResults,               false);
 
 define_pd_global(bool,     LIRFillDelaySlots,            false);
 define_pd_global(bool,     OptimizeSinglePrecision,      false);
--- a/src/hotspot/cpu/s390/frame_s390.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/s390/frame_s390.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -59,17 +59,9 @@
   address fp = (address)_fp;
   address unextended_sp = (address)_unextended_sp;
 
-  // Consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
-    JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_reserved_zone_size() : 0;
-  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
-
+  // consider stack guards when trying to determine "safe" stack pointers
   // sp must be within the usable part of the stack (not in guards)
-  bool sp_safe = (sp < thread->stack_base()) &&
-                 (sp >= thread->stack_base() - usable_stack_size);
-
-
-  if (!sp_safe) {
+  if (!thread->is_in_usable_stack(sp)) {
     return false;
   }
 
@@ -81,10 +73,10 @@
   }
 
   // An fp must be within the stack and above (but not equal) sp.
-  bool fp_safe = (fp <= thread->stack_base()) &&  (fp > sp);
+  bool fp_safe = (fp < thread->stack_base()) && (fp > sp);
   // An interpreter fp must be within the stack and above (but not equal) sp.
   // Moreover, it must be at least the size of the z_ijava_state structure.
-  bool fp_interp_safe = (fp <= thread->stack_base()) && (fp > sp) &&
+  bool fp_interp_safe = (fp < thread->stack_base()) && (fp > sp) &&
     ((fp - sp) >= z_ijava_state_size);
 
   // We know sp/unextended_sp are safe, only fp is questionable here
@@ -144,7 +136,7 @@
 
     // sender_fp must be within the stack and above (but not
     // equal) current frame's fp.
-    if (sender_fp > thread->stack_base() || sender_fp <= fp) {
+    if (sender_fp >= thread->stack_base() || sender_fp <= fp) {
         return false;
     }
 
--- a/src/hotspot/cpu/s390/s390.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/s390/s390.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -1710,8 +1710,7 @@
 // Java calling convention forces doubles to be aligned.
 const bool Matcher::misaligned_doubles_ok = true;
 
-// Advertise here if the CPU requires explicit rounding operations
-// to implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Do floats take an entire double register or just half?
--- a/src/hotspot/cpu/sparc/c1_globals_sparc.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/sparc/c1_globals_sparc.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -61,7 +61,6 @@
 #endif // !TIERED
 
 define_pd_global(bool, UseTypeProfile,               false);
-define_pd_global(bool, RoundFPResults,               false);
 
 define_pd_global(bool, LIRFillDelaySlots,            true );
 define_pd_global(bool, OptimizeSinglePrecision,      false);
--- a/src/hotspot/cpu/sparc/frame_sparc.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/sparc/frame_sparc.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,22 +177,21 @@
   address _SP = (address) sp();
   address _FP = (address) fp();
   address _UNEXTENDED_SP = (address) unextended_sp();
-  // sp must be within the stack
-  bool sp_safe = (_SP <= thread->stack_base()) &&
-                 (_SP >= thread->stack_base() - thread->stack_size());
 
-  if (!sp_safe) {
+  // consider stack guards when trying to determine "safe" stack pointers
+  // sp must be within the usable part of the stack (not in guards)
+  if (!thread->is_in_usable_stack(_SP)) {
     return false;
   }
 
   // unextended sp must be within the stack and above or equal sp
-  bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
+  bool unextended_sp_safe = (_UNEXTENDED_SP < thread->stack_base()) &&
                             (_UNEXTENDED_SP >= _SP);
 
   if (!unextended_sp_safe) return false;
 
   // an fp must be within the stack and above (but not equal) sp
-  bool fp_safe = (_FP <= thread->stack_base()) &&
+  bool fp_safe = (_FP < thread->stack_base()) &&
                  (_FP > _SP);
 
   // We know sp/unextended_sp are safe only fp is questionable here
@@ -252,7 +251,7 @@
 
     // an fp must be within the stack and above (but not equal) current frame's _FP
 
-    bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
+    bool sender_fp_safe = (sender_fp < thread->stack_base()) &&
                    (sender_fp > _FP);
 
     if (!sender_fp_safe) {
@@ -280,7 +279,7 @@
 
       address jcw = (address)sender.entry_frame_call_wrapper();
 
-      bool jcw_safe = (jcw <= thread->stack_base()) && (jcw > sender_fp);
+      bool jcw_safe = (jcw < thread->stack_base()) && (jcw > sender_fp);
 
       return jcw_safe;
     }
@@ -672,7 +671,7 @@
 
   address locals =  (address) *interpreter_frame_locals_addr();
 
-  if (locals > thread->stack_base() || locals < (address) fp()) return false;
+  if (locals >= thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
   return true;
--- a/src/hotspot/cpu/sparc/sparc.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/sparc/sparc.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -1873,8 +1873,7 @@
 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
 }
 
-// Advertise here if the CPU requires explicit rounding operations
-// to implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Are floats converted to double when stored to stack during deoptimization?
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -7159,7 +7159,7 @@
 // scalar single/double precision replicate
 
 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL
-void Assembler::vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
+void Assembler::vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -7167,7 +7167,7 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
-void Assembler::vpbroadcastss(XMMRegister dst, Address src, int vector_len) {
+void Assembler::vbroadcastss(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
@@ -7180,7 +7180,7 @@
 }
 
 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL
-void Assembler::vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
+void Assembler::vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
@@ -7189,7 +7189,7 @@
   emit_int8((unsigned char)(0xC0 | encode));
 }
 
-void Assembler::vpbroadcastsd(XMMRegister dst, Address src, int vector_len) {
+void Assembler::vbroadcastsd(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2217,10 +2217,10 @@
   void evbroadcasti64x2(XMMRegister dst, Address src, int vector_len);
 
   // scalar single/double precision replicate
-  void vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
-  void vpbroadcastss(XMMRegister dst, Address src, int vector_len);
-  void vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
-  void vpbroadcastsd(XMMRegister dst, Address src, int vector_len);
+  void vbroadcastss(XMMRegister dst, XMMRegister src, int vector_len);
+  void vbroadcastss(XMMRegister dst, Address src, int vector_len);
+  void vbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len);
+  void vbroadcastsd(XMMRegister dst, Address src, int vector_len);
 
   // gpr sourced byte/word/dword/qword replicate
   void evpbroadcastb(XMMRegister dst, Register src, int vector_len);
--- a/src/hotspot/cpu/x86/c1_Defs_x86.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/c1_Defs_x86.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -33,7 +33,7 @@
 
 // explicit rounding operations are required to implement the strictFP mode
 enum {
-  pd_strict_fp_requires_explicit_rounding = true
+  pd_strict_fp_requires_explicit_rounding = LP64_ONLY( false ) NOT_LP64 ( true )
 };
 
 
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -908,11 +908,6 @@
   } else if (dest->is_double_xmm() && !src->is_double_xmm()) {
     __ fstp_d(Address(rsp, 0));
     __ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
-
-  // move between fpu-registers (no instruction necessary because of fpu-stack)
-  } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
-    assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
-    assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
 #endif // !_LP64
 
     // move between xmm-registers
@@ -923,6 +918,13 @@
     assert(src->is_double_xmm(), "must match");
     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 
+#ifndef _LP64
+    // move between fpu-registers (no instruction necessary because of fpu-stack)
+  } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
+    assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
+    assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
+#endif // !_LP64
+
   } else {
     ShouldNotReachHere();
   }
@@ -1595,6 +1597,7 @@
       __ movl(Address(rsp, BytesPerWord), src->as_register_hi());
       __ fild_d(Address(rsp, 0));
       // float result is rounded later through spilling
+      break;
 
     case Bytecodes::_f2i:
     case Bytecodes::_d2i:
--- a/src/hotspot/cpu/x86/c1_globals_x86.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/c1_globals_x86.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -60,7 +60,6 @@
 define_pd_global(bool,   CICompileOSR,                 true );
 #endif // !TIERED
 define_pd_global(bool, UseTypeProfile,                 false);
-define_pd_global(bool, RoundFPResults,                 true );
 
 define_pd_global(bool, LIRFillDelaySlots,              false);
 define_pd_global(bool, OptimizeSinglePrecision,        true );
--- a/src/hotspot/cpu/x86/frame_x86.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/frame_x86.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,16 +57,8 @@
   address   unextended_sp = (address)_unextended_sp;
 
   // consider stack guards when trying to determine "safe" stack pointers
-  static size_t stack_guard_size = os::uses_stack_guard_pages() ?
-    JavaThread::stack_red_zone_size() + JavaThread::stack_yellow_zone_size() : 0;
-  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
-
   // sp must be within the usable part of the stack (not in guards)
-  bool sp_safe = (sp < thread->stack_base()) &&
-                 (sp >= thread->stack_base() - usable_stack_size);
-
-
-  if (!sp_safe) {
+  if (!thread->is_in_usable_stack(sp)) {
     return false;
   }
 
@@ -553,7 +545,7 @@
 
   address locals =  (address) *interpreter_frame_locals_addr();
 
-  if (locals > thread->stack_base() || locals < (address) fp()) return false;
+  if (locals >= thread->stack_base() || locals < (address) fp()) return false;
 
   // We'd have to be pretty unlucky to be mislead at this point
   return true;
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -824,7 +824,7 @@
 
   Register obj = stub->obj()->as_register();
   Register res = stub->result()->as_register();
-  Register addr = stub->addr()->as_register();
+  Register addr = stub->addr()->as_pointer_register();
   Register tmp1 = stub->tmp1()->as_register();
   Register tmp2 = stub->tmp2()->as_register();
   assert_different_registers(obj, res, addr, tmp1, tmp2);
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -672,11 +672,14 @@
     }
   }
   if (FLAG_IS_DEFAULT(UseAVX)) {
-    FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
-    if (is_intel_family_core() && _model == CPU_MODEL_SKYLAKE && _stepping < 5) {
-      FLAG_SET_DEFAULT(UseAVX, 2);  //Set UseAVX=2 for Skylake
+    // Don't use AVX-512 on older Skylakes unless explicitly requested.
+    if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) {
+      FLAG_SET_DEFAULT(UseAVX, 2);
+    } else {
+      FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
     }
-  } else if (UseAVX > use_avx_limit) {
+  }
+  if (UseAVX > use_avx_limit) {
     warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
   } else if (UseAVX < 0) {
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -868,6 +868,9 @@
   static bool is_intel_family_core() { return is_intel() &&
                                        extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
 
+  static bool is_intel_skylake() { return is_intel_family_core() &&
+                                          extended_cpu_model() == CPU_MODEL_SKYLAKE; }
+
   static bool is_intel_tsc_synched_at_init()  {
     if (is_intel_family_core()) {
       uint32_t ext_model = extended_cpu_model();
--- a/src/hotspot/cpu/x86/x86.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/x86.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -3377,20 +3377,18 @@
 // ====================ReplicateI=======================================
 
 instruct ReplI_reg(vec dst, rRegI src) %{
-  predicate((n->as_Vector()->length() <= 8) ||
-            (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateI src));
   format %{ "replicateI $dst,$src" %}
   ins_encode %{
     uint vlen = vector_length(this);
-    if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vlen_enc = vector_length_encoding(this);
       __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vlen_enc);
     } else {
       __ movdl($dst$$XMMRegister, $src$$Register);
       __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
       if (vlen >= 8) {
-        assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplI_reg_leg
+        assert(vlen == 8, "sanity");
         __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
       }
     }
@@ -3398,33 +3396,19 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplI_reg_leg(legVec dst, rRegI src) %{
-  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateI src));
-  format %{ "replicateI  $dst,$src" %}
-  ins_encode %{
-    __ movdl($dst$$XMMRegister, $src$$Register);
-    __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplI_mem(vec dst, memory mem) %{
-  predicate((n->as_Vector()->length() <= 8  && VM_Version::supports_avx()) ||
-            (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl()));
+  predicate(VM_Version::supports_avx()); // use VEX-encoded pshufd to relax 16-byte alignment restriction on the source
   match(Set dst (ReplicateI (LoadI mem)));
   format %{ "replicateI $dst,$mem" %}
   ins_encode %{
     uint vlen = vector_length(this);
     if (vlen <= 4) {
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
       __ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len);
     } else {
-      assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplI_mem_leg
+      assert(vlen == 8, "sanity");
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
       __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
     }
@@ -3432,21 +3416,7 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplI_mem_leg(legVec dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateI (LoadI mem)));
-  format %{ "replicateI $dst,$mem" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplI_imm(vec dst, immI con) %{
-  predicate((n->as_Vector()->length() <= 8) ||
-            (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateI con));
   format %{ "replicateI $dst,$con" %}
   ins_encode %{
@@ -3454,7 +3424,7 @@
     InternalAddress constaddr = $constantaddress(replicate8_imm($con$$constant, 4));
     if (vlen == 2) {
       __ movq($dst$$XMMRegister, constaddr);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
       __ movq($dst$$XMMRegister, constaddr);
       __ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len);
@@ -3470,19 +3440,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplI_imm_leg(legVec dst, immI con) %{
-  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateI con));
-  format %{ "replicateI $dst,$con" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate integer (4 byte) scalar zero to be vector
 instruct ReplI_zero(vec dst, immI0 zero) %{
   match(Set dst (ReplicateI zero));
@@ -3504,8 +3461,6 @@
 #ifdef _LP64
 // Replicate long (8 byte) scalar to be vector
 instruct ReplL_reg(vec dst, rRegL src) %{
-  predicate((n->as_Vector()->length() <= 4) ||
-            (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateL src));
   format %{ "replicateL $dst,$src" %}
   ins_encode %{
@@ -3513,11 +3468,11 @@
     if (vlen == 2) {
       __ movdq($dst$$XMMRegister, $src$$Register);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vlen_enc = vector_length_encoding(this);
       __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vlen_enc);
     } else {
-      assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_reg_leg
+      assert(vlen == 4, "sanity");
       __ movdq($dst$$XMMRegister, $src$$Register);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
       __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
@@ -3525,19 +3480,6 @@
   %}
   ins_pipe( pipe_slow );
 %}
-
-instruct ReplL_reg_leg(legVec dst, rRegL src) %{
-  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateL src));
-  format %{ "replicateL $dst,$src" %}
-  ins_encode %{
-    __ movdq($dst$$XMMRegister, $src$$Register);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
 #else // _LP64
 // Replicate long (8 byte) scalar to be vector
 instruct ReplL_reg(vec dst, eRegL src, vec tmp) %{
@@ -3595,8 +3537,6 @@
 #endif // _LP64
 
 instruct ReplL_mem(vec dst, memory mem) %{
-  predicate((n->as_Vector()->length() <= 4) ||
-            (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateL (LoadL mem)));
   format %{ "replicateL $dst,$mem" %}
   ins_encode %{
@@ -3604,11 +3544,11 @@
     if (vlen == 2) {
       __ movq($dst$$XMMRegister, $mem$$Address);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vlen_enc = vector_length_encoding(this);
       __ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vlen_enc);
     } else {
-      assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_mem_leg
+      assert(vlen == 4, "sanity");
       __ movq($dst$$XMMRegister, $mem$$Address);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
       __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
@@ -3617,23 +3557,8 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplL_mem_leg(legVec dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateL (LoadL mem)));
-  format %{ "replicateL $dst,$mem" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $mem$$Address);
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Replicate long (8 byte) scalar immediate to be vector by loading from const table.
 instruct ReplL_imm(vec dst, immL con) %{
-  predicate((n->as_Vector()->length() <= 4) ||
-            (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateL con));
   format %{ "replicateL $dst,$con" %}
   ins_encode %{
@@ -3642,12 +3567,12 @@
     if (vlen == 2) {
       __ movq($dst$$XMMRegister, const_addr);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vlen_enc = vector_length_encoding(this);
       __ movq($dst$$XMMRegister, const_addr);
       __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
     } else {
-      assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_imm_leg
+      assert(vlen == 4, "sanity");
       __ movq($dst$$XMMRegister, const_addr);
       __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
       __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
@@ -3656,19 +3581,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplL_imm_leg(legVec dst, immL con) %{
-  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateL con));
-  format %{ "replicateL $dst,$con" %}
-  ins_encode %{
-    __ movq($dst$$XMMRegister, $constantaddress($con));
-    __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplL_zero(vec dst, immL0 zero) %{
   match(Set dst (ReplicateL zero));
   format %{ "replicateL $dst,$zero" %}
@@ -3687,19 +3599,17 @@
 // ====================ReplicateF=======================================
 
 instruct ReplF_reg(vec dst, vlRegF src) %{
-  predicate((n->as_Vector()->length() <= 8) ||
-            (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateF src));
   format %{ "replicateF $dst,$src" %}
   ins_encode %{
     uint vlen = vector_length(this);
     if (vlen <= 4) {
       __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
-      __ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+      __ vbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len);
     } else {
-      assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplF_reg_leg
+      assert(vlen == 8, "sanity");
       __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
       __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
     }
@@ -3707,32 +3617,19 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplF_reg_leg(legVec dst, vlRegF src) %{
-  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateF src));
-  format %{ "replicateF $dst,$src" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
-    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplF_mem(vec dst, memory mem) %{
-  predicate((n->as_Vector()->length() <= 8  && VM_Version::supports_avx()) ||
-            (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl()));
+  predicate(VM_Version::supports_avx()); // use VEX-encoded pshufd to relax 16-byte alignment restriction on the source
   match(Set dst (ReplicateF (LoadF mem)));
   format %{ "replicateF $dst,$mem" %}
   ins_encode %{
     uint vlen = vector_length(this);
     if (vlen <= 4) {
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
-      __ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len);
+      __ vbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len);
     } else {
-      assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplF_mem_leg
+      assert(vlen == 8, "sanity");
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
       __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
     }
@@ -3740,18 +3637,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplF_mem_leg(legVec dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateF (LoadF mem)));
-  format %{ "replicateF $dst,$mem" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00);
-    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplF_zero(vec dst, immF0 zero) %{
   match(Set dst (ReplicateF zero));
   format %{ "replicateF $dst,$zero" %}
@@ -3771,19 +3656,17 @@
 
 // Replicate double (8 bytes) scalar to be vector
 instruct ReplD_reg(vec dst, vlRegD src) %{
-  predicate((n->as_Vector()->length() <= 4) ||
-            (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl()));
   match(Set dst (ReplicateD src));
   format %{ "replicateD $dst,$src" %}
   ins_encode %{
     uint vlen = vector_length(this);
     if (vlen == 2) {
       __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
-      __ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
+      __ vbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len);
     } else {
-      assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplD_reg_leg
+      assert(vlen == 4, "sanity");
       __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
       __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
     }
@@ -3791,32 +3674,19 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplD_reg_leg(legVec dst, vlRegD src) %{
-  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateD src));
-  format %{ "replicateD $dst,$src" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
-    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplD_mem(vec dst, memory mem) %{
-  predicate((n->as_Vector()->length() <= 4 && VM_Version::supports_avx()) ||
-            (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl()));
+  predicate(VM_Version::supports_avx()); // use VEX-encoded pshufd to relax 16-byte alignment restriction on the source
   match(Set dst (ReplicateD (LoadD mem)));
   format %{ "replicateD $dst,$mem" %}
   ins_encode %{
     uint vlen = vector_length(this);
     if (vlen == 2) {
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
-    } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
+    } else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
       int vector_len = vector_length_encoding(this);
-      __ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len);
+      __ vbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len);
     } else {
-      assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplD_mem_leg
+      assert(vlen == 4, "sanity");
       __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
       __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
     }
@@ -3824,18 +3694,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct ReplD_mem_leg(legVec dst, memory mem) %{
-  predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl());
-  match(Set dst (ReplicateD (LoadD mem)));
-  format %{ "replicateD $dst,$mem" %}
-  ins_encode %{
-    __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44);
-    __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
-    __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 instruct ReplD_zero(vec dst, immD0 zero) %{
   match(Set dst (ReplicateD zero));
   format %{ "replicateD $dst,$zero" %}
@@ -5396,18 +5254,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct vshiftcntimm(vec dst, immI8 cnt, rRegI tmp) %{
-  match(Set dst cnt);
-  effect(TEMP tmp);
-  format %{ "movl    $tmp,$cnt\t"
-            "movdl   $dst,$tmp\t! load shift count" %}
-  ins_encode %{
-    __ movl($tmp$$Register, $cnt$$constant);
-    __ movdl($dst$$XMMRegister, $tmp$$Register);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
 // Byte vector shift
 instruct vshiftB(vec dst, vec src, vec shift, vec tmp, rRegI scratch) %{
   predicate(n->as_Vector()->length() <= 8);
--- a/src/hotspot/cpu/x86/x86_32.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/x86_32.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -1516,8 +1516,7 @@
   node->_opnds[opcnt] = new_memory;
 }
 
-// Advertise here if the CPU requires explicit rounding operations
-// to implement the UseStrictFP mode.
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
 const bool Matcher::strict_fp_requires_explicit_rounding = true;
 
 // Are floats conerted to double when stored to stack during deoptimization?
--- a/src/hotspot/cpu/x86/x86_64.ad	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/cpu/x86/x86_64.ad	Wed Mar 04 12:01:01 2020 +0100
@@ -1700,9 +1700,8 @@
 // No-op on amd64
 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
 
-// Advertise here if the CPU requires explicit rounding operations to
-// implement the UseStrictFP mode.
-const bool Matcher::strict_fp_requires_explicit_rounding = true;
+// Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
+const bool Matcher::strict_fp_requires_explicit_rounding = false;
 
 // Are floats conerted to double when stored to stack during deoptimization?
 // On x64 it is stored without convertion so we can use normal access.
@@ -10521,24 +10520,6 @@
 
 //----------Arithmetic Conversion Instructions---------------------------------
 
-instruct roundFloat_nop(regF dst)
-%{
-  match(Set dst (RoundFloat dst));
-
-  ins_cost(0);
-  ins_encode();
-  ins_pipe(empty);
-%}
-
-instruct roundDouble_nop(regD dst)
-%{
-  match(Set dst (RoundDouble dst));
-
-  ins_cost(0);
-  ins_encode();
-  ins_pipe(empty);
-%}
-
 instruct convF2D_reg_reg(regD dst, regF src)
 %{
   match(Set dst (ConvF2D src));
--- a/src/hotspot/os/aix/os_aix.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/aix/os_aix.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3720,10 +3720,18 @@
     errno = ENAMETOOLONG;
     return -1;
   }
-  int fd;
-
-  fd = ::open64(path, oflag, mode);
-  if (fd == -1) return -1;
+  // AIX 7.X now supports O_CLOEXEC too, like modern Linux; but we have to be careful, see
+  // IV90804: OPENING A FILE IN AFS WITH O_CLOEXEC FAILS WITH AN EINVAL ERROR APPLIES TO AIX 7100-04 17/04/14 PTF PECHANGE
+  int oflag_with_o_cloexec = oflag | O_CLOEXEC;
+
+  int fd = ::open64(path, oflag_with_o_cloexec, mode);
+  if (fd == -1) {
+    // we might fail in the open call when O_CLOEXEC is set, so try again without (see IV90804)
+    fd = ::open64(path, oflag, mode);
+    if (fd == -1) {
+      return -1;
+    }
+  }
 
   // If the open succeeded, the file might still be a directory.
   {
@@ -3755,21 +3763,25 @@
   //
   // - might cause an fopen in the subprocess to fail on a system
   //   suffering from bug 1085341.
-  //
-  // (Yes, the default setting of the close-on-exec flag is a Unix
-  // design flaw.)
-  //
-  // See:
-  // 1085341: 32-bit stdio routines should support file descriptors >255
-  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
-  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
-#ifdef FD_CLOEXEC
-  {
+
+  // Validate that the use of the O_CLOEXEC flag on open above worked.
+  static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
+  if (O_CLOEXEC_is_known_to_work == 0) {
     int flags = ::fcntl(fd, F_GETFD);
-    if (flags != -1)
+    if (flags != -1) {
+      if ((flags & FD_CLOEXEC) != 0) {
+        O_CLOEXEC_is_known_to_work = 1;
+      } else { // it does not work
+        ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+        O_CLOEXEC_is_known_to_work = -1;
+      }
+    }
+  } else if (O_CLOEXEC_is_known_to_work == -1) {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1) {
       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+    }
   }
-#endif
 
   return fd;
 }
--- a/src/hotspot/os/bsd/osThread_bsd.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/bsd/osThread_bsd.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,11 +36,12 @@
 #else
   _thread_id        = NULL;
 #endif
+  _unique_thread_id = 0;
   _pthread_id       = NULL;
-  _siginfo = NULL;
-  _ucontext = NULL;
-  _expanding_stack = 0;
-  _alt_sig_stack = NULL;
+  _siginfo          = NULL;
+  _ucontext         = NULL;
+  _expanding_stack  = 0;
+  _alt_sig_stack    = NULL;
 
   sigemptyset(&_caller_sigmask);
 
@@ -49,6 +50,22 @@
   assert(_startThread_lock !=NULL, "check");
 }
 
+// Additional thread_id used to correlate threads in SA
+void OSThread::set_unique_thread_id() {
+#ifdef __APPLE__
+  thread_identifier_info_data_t m_ident_info;
+  mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
+
+  mach_port_t mach_thread_port = mach_thread_self();
+  guarantee(mach_thread_port != 0, "just checking");
+  thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
+              (thread_info_t) &m_ident_info, &count);
+  mach_port_deallocate(mach_task_self(), mach_thread_port);
+
+  _unique_thread_id = m_ident_info.thread_id;
+#endif
+}
+
 void OSThread::pd_destroy() {
   delete _startThread_lock;
 }
--- a/src/hotspot/os/bsd/osThread_bsd.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/bsd/osThread_bsd.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,9 +82,7 @@
     _pthread_id = tid;
   }
 
-  void set_unique_thread_id(uint64_t id) {
-    _unique_thread_id = id;
-  }
+  void set_unique_thread_id();
 
   // ***************************************************************
   // suspension support.
--- a/src/hotspot/os/bsd/os_bsd.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -634,19 +634,6 @@
 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
 #endif
 
-#ifdef __APPLE__
-static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) {
-  // Additional thread_id used to correlate threads in SA
-  thread_identifier_info_data_t     m_ident_info;
-  mach_msg_type_number_t            count = THREAD_IDENTIFIER_INFO_COUNT;
-
-  thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO,
-              (thread_info_t) &m_ident_info, &count);
-
-  return m_ident_info.thread_id;
-}
-#endif
-
 // Thread start routine for all newly created threads
 static void *thread_native_entry(Thread *thread) {
 
@@ -672,10 +659,10 @@
     os::current_thread_id(), (uintx) pthread_self());
 
 #ifdef __APPLE__
-  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
-  guarantee(unique_thread_id != 0, "unique thread id was not found");
-  osthread->set_unique_thread_id(unique_thread_id);
+  // Store unique OS X thread id used by SA
+  osthread->set_unique_thread_id();
 #endif
+
   // initialize signal mask for this thread
   os::Bsd::hotspot_sigmask(thread);
 
@@ -823,12 +810,12 @@
 
   osthread->set_thread_id(os::Bsd::gettid());
 
+#ifdef __APPLE__
+  // Store unique OS X thread id used by SA
+  osthread->set_unique_thread_id();
+#endif
+
   // Store pthread info into the OSThread
-#ifdef __APPLE__
-  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
-  guarantee(unique_thread_id != 0, "just checking");
-  osthread->set_unique_thread_id(unique_thread_id);
-#endif
   osthread->set_pthread_id(::pthread_self());
 
   // initialize floating point control register
@@ -1100,12 +1087,11 @@
 pid_t os::Bsd::gettid() {
   int retval = -1;
 
-#ifdef __APPLE__ //XNU kernel
-  // despite the fact mach port is actually not a thread id use it
-  // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
-  retval = ::pthread_mach_thread_np(::pthread_self());
-  guarantee(retval != 0, "just checking");
-  return retval;
+#ifdef __APPLE__ // XNU kernel
+  mach_port_t port = mach_thread_self();
+  guarantee(MACH_PORT_VALID(port), "just checking");
+  mach_port_deallocate(mach_task_self(), port);
+  return (pid_t)port;
 
 #else
   #ifdef __FreeBSD__
@@ -1128,7 +1114,7 @@
 
 intx os::current_thread_id() {
 #ifdef __APPLE__
-  return (intx)::pthread_mach_thread_np(::pthread_self());
+  return (intx)os::Bsd::gettid();
 #else
   return (intx)::pthread_self();
 #endif
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -719,7 +719,7 @@
   assert(t->osthread()->expanding_stack(), "expand should be set");
   assert(t->stack_base() != NULL, "stack_base was not initialized");
 
-  if (addr <  t->stack_base() && addr >= t->stack_reserved_zone_base()) {
+  if (t->is_in_usable_stack(addr)) {
     sigset_t mask_all, old_sigset;
     sigfillset(&mask_all);
     pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
--- a/src/hotspot/os/windows/os_windows.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2542,7 +2542,7 @@
           //
           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
           address addr = (address) exceptionRecord->ExceptionInformation[1];
-          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
+          if (thread->is_in_usable_stack(addr)) {
             addr = (address)((uintptr_t)addr &
                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
             os::commit_memory((char *)addr, thread->stack_base() - addr,
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -336,8 +336,7 @@
         return 1;
       }
       // check if fault address is within thread stack
-      if (addr < thread->stack_base() &&
-          addr >= thread->stack_base() - thread->stack_size()) {
+      if (thread->on_local_stack(addr)) {
         // stack overflow
         if (thread->in_stack_yellow_reserved_zone(addr)) {
           thread->disable_stack_yellow_reserved_zone();
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -63,7 +63,7 @@
 
     if (ret_frame.is_interpreted_frame()) {
       frame::z_ijava_state* istate = ret_frame.ijava_state_unchecked();
-      if (stack_base() >= (address)istate && (address)istate > stack_end()) {
+      if (on_local_stack((address)istate)) {
         return false;
       }
       const Method *m = (const Method*)(istate->method);
--- a/src/hotspot/share/adlc/dfa.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/adlc/dfa.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -471,7 +471,7 @@
 
 
 class dfa_shared_preds {
-  enum { count = 4 };
+  enum { count = 3 IA32_ONLY( + 1 ) };
 
   static bool        _found[count];
   static const char* _type [count];
@@ -582,15 +582,10 @@
   }
 };
 // shared predicates, _var and _pred entry should be the same length
-bool         dfa_shared_preds::_found[dfa_shared_preds::count]
-  = { false, false, false, false };
-const char*  dfa_shared_preds::_type[dfa_shared_preds::count]
-  = { "int", "jlong", "intptr_t", "bool" };
-const char*  dfa_shared_preds::_var [dfa_shared_preds::count]
-  = { "_n_get_int__", "_n_get_long__", "_n_get_intptr_t__", "Compile__current____select_24_bit_instr__" };
-const char*  dfa_shared_preds::_pred[dfa_shared_preds::count]
-  = { "n->get_int()", "n->get_long()", "n->get_intptr_t()", "Compile::current()->select_24_bit_instr()" };
-
+bool         dfa_shared_preds::_found[dfa_shared_preds::count] = { false,          false,           false               IA32_ONLY(COMMA false)  };
+const char*  dfa_shared_preds::_type [dfa_shared_preds::count] = { "int",          "jlong",         "intptr_t"          IA32_ONLY(COMMA "bool") };
+const char*  dfa_shared_preds::_var  [dfa_shared_preds::count] = { "_n_get_int__", "_n_get_long__", "_n_get_intptr_t__" IA32_ONLY(COMMA "Compile__current____select_24_bit_instr__") };
+const char*  dfa_shared_preds::_pred [dfa_shared_preds::count] = { "n->get_int()", "n->get_long()", "n->get_intptr_t()" IA32_ONLY(COMMA "Compile::current()->select_24_bit_instr()") };
 
 void ArchDesc::gen_dfa_state_body(FILE* fp, Dict &minimize, ProductionState &status, Dict &operands_chained_from, int i) {
   // Start the body of each Op_XXX sub-dfa with a clean state.
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -607,9 +607,15 @@
       return load;
     }
 
-    if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) {
-      // can't skip load since value might get rounded as a side effect
-      return load;
+    if (strict_fp_requires_explicit_rounding && load->type()->is_float_kind()) {
+#ifdef IA32
+      if (UseSSE < 2) {
+        // can't skip load since value might get rounded as a side effect
+        return load;
+      }
+#else
+      Unimplemented();
+#endif // IA32
     }
 
     ciField* field = load->field();
@@ -2272,17 +2278,23 @@
 
 
 Value GraphBuilder::round_fp(Value fp_value) {
-  // no rounding needed if SSE2 is used
-  if (RoundFPResults && UseSSE < 2) {
-    // Must currently insert rounding node for doubleword values that
-    // are results of expressions (i.e., not loads from memory or
-    // constants)
-    if (fp_value->type()->tag() == doubleTag &&
-        fp_value->as_Constant() == NULL &&
-        fp_value->as_Local() == NULL &&       // method parameters need no rounding
-        fp_value->as_RoundFP() == NULL) {
-      return append(new RoundFP(fp_value));
+  if (strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    // no rounding needed if SSE2 is used
+    if (UseSSE < 2) {
+      // Must currently insert rounding node for doubleword values that
+      // are results of expressions (i.e., not loads from memory or
+      // constants)
+      if (fp_value->type()->tag() == doubleTag &&
+          fp_value->as_Constant() == NULL &&
+          fp_value->as_Local() == NULL &&       // method parameters need no rounding
+          fp_value->as_RoundFP() == NULL) {
+        return append(new RoundFP(fp_value));
+      }
     }
+#else
+    Unimplemented();
+#endif // IA32
   }
   return fp_value;
 }
@@ -3766,11 +3778,17 @@
   // Proper inlining of methods with jsrs requires a little more work.
   if (callee->has_jsrs()                 ) INLINE_BAILOUT("jsrs not handled properly by inliner yet");
 
-  // When SSE2 is used on intel, then no special handling is needed
-  // for strictfp because the enum-constant is fixed at compile time,
-  // the check for UseSSE2 is needed here
-  if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) {
-    INLINE_BAILOUT("caller and callee have different strict fp requirements");
+  if (strict_fp_requires_explicit_rounding &&
+      method()->is_strict() != callee->is_strict()) {
+#ifdef IA32
+    // If explicit rounding is required, do not inline strict code into non-strict code (or the reverse).
+    // When SSE2 is present, no special handling is needed.
+    if (UseSSE < 2) {
+      INLINE_BAILOUT("caller and callee have different strict fp requirements");
+    }
+#else
+    Unimplemented();
+#endif // IA32
   }
 
   if (is_profiling() && !callee->ensure_method_data()) {
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -778,6 +778,7 @@
 
 
 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
+  assert(strict_fp_requires_explicit_rounding, "not required");
   assert((src->is_single_fpu() && dest->is_single_stack()) ||
          (src->is_double_fpu() && dest->is_double_stack()),
          "round_fp: rounds register -> stack location");
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -899,13 +899,19 @@
 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
   assert(opr->is_register(), "why spill if item is not register?");
 
-  if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
-    LIR_Opr result = new_register(T_FLOAT);
-    set_vreg_flag(result, must_start_in_memory);
-    assert(opr->is_register(), "only a register can be spilled");
-    assert(opr->value_type()->is_float(), "rounding only for floats available");
-    __ roundfp(opr, LIR_OprFact::illegalOpr, result);
-    return result;
+  if (strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    if (UseSSE < 1 && opr->is_single_fpu()) {
+      LIR_Opr result = new_register(T_FLOAT);
+      set_vreg_flag(result, must_start_in_memory);
+      assert(opr->is_register(), "only a register can be spilled");
+      assert(opr->value_type()->is_float(), "rounding only for floats available");
+      __ roundfp(opr, LIR_OprFact::illegalOpr, result);
+      return result;
+    }
+#else
+    Unimplemented();
+#endif // IA32
   }
   return opr;
 }
@@ -1951,6 +1957,8 @@
 
 
 void LIRGenerator::do_RoundFP(RoundFP* x) {
+  assert(strict_fp_requires_explicit_rounding, "not required");
+
   LIRItem input(x->input(), this);
   input.load_item();
   LIR_Opr input_opr = input.result();
--- a/src/hotspot/share/c1/c1_RangeCheckElimination.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_RangeCheckElimination.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -30,6 +30,9 @@
 #include "c1/c1_ValueMap.hpp"
 #include "ci/ciMethodData.hpp"
 #include "runtime/deoptimization.hpp"
+#ifdef ASSERT
+#include "utilities/bitMap.inline.hpp"
+#endif
 
 // Macros for the Trace and the Assertion flag
 #ifdef ASSERT
@@ -1050,6 +1053,7 @@
 }
 #endif
 
+#ifdef ASSERT
 // Verification or the IR
 RangeCheckEliminator::Verification::Verification(IR *ir) : _used(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), false) {
   this->_ir = ir;
@@ -1099,21 +1103,16 @@
     BlockList *all_blocks = _ir->linear_scan_order();
     assert(block->number_of_preds() >= 1, "Block must have at least one predecessor");
     assert(!block->is_set(BlockBegin::exception_entry_flag), "Loop header must not be exception handler!");
-    // Sometimes, the backbranch comes from an exception handler. In
-    // this case, loop indexes/loop depths may not appear correct.
+
     bool loop_through_xhandler = false;
-    for (int i = 0; i < block->number_of_exception_handlers(); i++) {
-      BlockBegin *xhandler = block->exception_handler_at(i);
-      for (int j = 0; j < block->number_of_preds(); j++) {
-        if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {
-          loop_through_xhandler = true;
+    for (int i=0; i<block->number_of_sux(); i++) {
+      BlockBegin *sux = block->sux_at(i);
+      if (!loop_through_xhandler) {
+        if (sux->loop_depth() == block->loop_depth() && sux->loop_index() != block->loop_index()) {
+          loop_through_xhandler = is_backbranch_from_xhandler(block);
+          assert(loop_through_xhandler, "Loop indices have to be the same if same depths but no backbranch from xhandler");
         }
       }
-    }
-
-    for (int i=0; i<block->number_of_sux(); i++) {
-      BlockBegin *sux = block->sux_at(i);
-      assert(sux->loop_depth() != block->loop_depth() || sux->loop_index() == block->loop_index() || loop_through_xhandler, "Loop index has to be same");
       assert(sux->loop_depth() == block->loop_depth() || sux->loop_index() != block->loop_index(), "Loop index has to be different");
     }
 
@@ -1132,6 +1131,54 @@
   }
 }
 
+// Called when a successor of a block has the same loop depth but a different loop index. This can happen if a backbranch comes from
+// an exception handler of a loop head block, for example, when a loop is only executed once on the non-exceptional path but is
+// repeated in case of an exception. In this case, the edge block->sux is not critical and was not split before.
+// Check if there is such a backbranch from an xhandler of 'block'.
+bool RangeCheckEliminator::Verification::is_backbranch_from_xhandler(BlockBegin* block) {
+  for (int i = 0; i < block->number_of_exception_handlers(); i++) {
+    BlockBegin *xhandler = block->exception_handler_at(i);
+    for (int j = 0; j < block->number_of_preds(); j++) {
+      if (dominates(xhandler, block->pred_at(j)) || xhandler == block->pred_at(j)) {
+        return true;
+      }
+    }
+  }
+
+  // In case of nested xhandlers, we need to walk through the loop (and all blocks belonging to exception handlers)
+  // to find an xhandler of 'block'.
+  if (block->number_of_exception_handlers() > 0) {
+    for (int i = 0; i < block->number_of_preds(); i++) {
+      BlockBegin* pred = block->pred_at(i);
+      if (pred->loop_index() == block->loop_index()) {
+        // Only check blocks that belong to the loop
+        // Do a BFS to find an xhandler block of 'block' starting from 'pred'
+        ResourceMark rm;
+        ResourceBitMap visited(BlockBegin::number_of_blocks());
+        BlockBeginList list;
+        list.push(pred);
+        while (!list.is_empty()) {
+          BlockBegin* next = list.pop();
+          if (!visited.at(next->block_id())) {
+            visited.set_bit(next->block_id());
+            for (int j = 0; j < block->number_of_exception_handlers(); j++) {
+               if (next == block->exception_handler_at(j)) {
+                 return true;
+               }
+            }
+            for (int j = 0; j < next->number_of_preds(); j++) {
+               if (next->pred_at(j) != block) {
+                 list.push(next->pred_at(j));
+               }
+            }
+          }
+        }
+      }
+    }
+  }
+  return false;
+}
+
 // Loop header must dominate all loop blocks
 bool RangeCheckEliminator::Verification::dominates(BlockBegin *dominator, BlockBegin *block) {
   BlockBegin *cur = block->dominator();
@@ -1195,6 +1242,7 @@
 
   return false;
 }
+#endif // ASSERT
 
 // Bound
 RangeCheckEliminator::Bound::~Bound() {
--- a/src/hotspot/share/c1/c1_RangeCheckElimination.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_RangeCheckElimination.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -43,6 +43,7 @@
   typedef GrowableArray<BlockBegin*> BlockBeginList;
   typedef GrowableArray<int> IntegerStack;
 
+#ifdef ASSERT
   class Verification : public BlockClosure {
   // RangeCheckEliminator::Verification should never get instatiated on the heap.
   private:
@@ -51,6 +52,10 @@
     void operator delete(void* p) { ShouldNotReachHere(); }
     void operator delete[](void* p) { ShouldNotReachHere(); }
 
+    bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
+    bool dominates(BlockBegin *dominator, BlockBegin *block);
+    bool is_backbranch_from_xhandler(BlockBegin* block);
+
     IR *_ir;
     boolArray _used;
     BlockBeginList _current;
@@ -59,9 +64,8 @@
   public:
     Verification(IR *ir);
     virtual void block_do(BlockBegin *block);
-    bool can_reach(BlockBegin *start, BlockBegin *end, BlockBegin *dont_use = NULL);
-    bool dominates(BlockBegin *dominator, BlockBegin *block);
   };
+#endif
 
 public:
   // Bounds for an instruction in the form x + c which c integer
--- a/src/hotspot/share/c1/c1_globals.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/c1/c1_globals.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -170,9 +170,6 @@
   develop(bool, UseTableRanges, true,                                       \
           "Faster versions of lookup table using ranges")                   \
                                                                             \
-  develop_pd(bool, RoundFPResults,                                          \
-          "Indicates whether rounding is needed for floating point results")\
-                                                                            \
   develop(intx, NestedInliningSizeRatio, 90,                                \
           "Percentage of prev. allowed inline size in recursive inlining")  \
           range(0, 100)                                                     \
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -213,14 +213,19 @@
   }
 
   ciInstanceKlass* self = this;
-  for (;;) {
-    assert(self->is_loaded(), "must be loaded to have size");
-    ciInstanceKlass* super = self->super();
-    if (super == NULL || super->nof_nonstatic_fields() == 0 ||
-        !super->contains_field_offset(offset)) {
-      return self;
-    } else {
-      self = super;  // return super->get_canonical_holder(offset)
+  assert(self->is_loaded(), "must be loaded to access field info");
+  ciField* field = self->get_field_by_offset(offset, false);
+  if (field != NULL) {
+    return field->holder();
+  } else {
+    for (;;) {
+      assert(self->is_loaded(), "must be loaded to have size");
+      ciInstanceKlass* super = self->super();
+      if (super == NULL || super->nof_nonstatic_fields() == 0) {
+        return self;
+      } else {
+        self = super;  // return super->get_canonical_holder(offset)
+      }
     }
   }
 }
@@ -392,6 +397,13 @@
 }
 
 // ------------------------------------------------------------------
+// ciInstanceKlass::contains_field_offset
+bool ciInstanceKlass::contains_field_offset(int offset) {
+  VM_ENTRY_MARK;
+  return get_instanceKlass()->contains_field_offset(offset);
+}
+
+// ------------------------------------------------------------------
 // ciInstanceKlass::get_field_by_offset
 ciField* ciInstanceKlass::get_field_by_offset(int field_offset, bool is_static) {
   if (!is_static) {
@@ -457,15 +469,9 @@
   ciInstanceKlass* super = this->super();
   GrowableArray<ciField*>* super_fields = NULL;
   if (super != NULL && super->has_nonstatic_fields()) {
-    int super_fsize  = super->nonstatic_field_size() * heapOopSize;
     int super_flen   = super->nof_nonstatic_fields();
     super_fields = super->_nonstatic_fields;
     assert(super_flen == 0 || super_fields != NULL, "first get nof_fields");
-    // See if I am no larger than my super; if so, I can use his fields.
-    if (fsize == super_fsize) {
-      _nonstatic_fields = super_fields;
-      return super_fields->length();
-    }
   }
 
   GrowableArray<ciField*>* fields = NULL;
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,9 +225,7 @@
   ciInstanceKlass* unique_concrete_subklass();
   bool has_finalizable_subclass();
 
-  bool contains_field_offset(int offset) {
-    return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
-  }
+  bool contains_field_offset(int offset);
 
   // Get the instance of java.lang.Class corresponding to
   // this klass.  This instance is used for locking of
--- a/src/hotspot/share/classfile/classFileParser.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -30,6 +30,7 @@
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/defaultMethods.hpp"
 #include "classfile/dictionary.hpp"
+#include "classfile/fieldLayoutBuilder.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
@@ -60,6 +61,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/os.hpp"
@@ -1686,8 +1688,12 @@
     field->set_allocation_type(atype);
 
     // After field is initialized with type, we can augment it with aux info
-    if (parsed_annotations.has_any_annotations())
+    if (parsed_annotations.has_any_annotations()) {
       parsed_annotations.apply_to(field);
+      if (field->is_contended()) {
+        _has_contended_fields = true;
+      }
+    }
   }
 
   int index = length;
@@ -3932,39 +3938,6 @@
   return super_klass;
 }
 
-static unsigned int compute_oop_map_count(const InstanceKlass* super,
-                                          unsigned int nonstatic_oop_map_count,
-                                          int first_nonstatic_oop_offset) {
-
-  unsigned int map_count =
-    NULL == super ? 0 : super->nonstatic_oop_map_count();
-  if (nonstatic_oop_map_count > 0) {
-    // We have oops to add to map
-    if (map_count == 0) {
-      map_count = nonstatic_oop_map_count;
-    }
-    else {
-      // Check whether we should add a new map block or whether the last one can
-      // be extended
-      const OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
-      const OopMapBlock* const last_map = first_map + map_count - 1;
-
-      const int next_offset = last_map->offset() + last_map->count() * heapOopSize;
-      if (next_offset == first_nonstatic_oop_offset) {
-        // There is no gap bettwen superklass's last oop field and first
-        // local oop field, merge maps.
-        nonstatic_oop_map_count -= 1;
-      }
-      else {
-        // Superklass didn't end with a oop field, add extra maps
-        assert(next_offset < first_nonstatic_oop_offset, "just checking");
-      }
-      map_count += nonstatic_oop_map_count;
-    }
-  }
-  return map_count;
-}
-
 #ifndef PRODUCT
 static void print_field_layout(const Symbol* name,
                                Array<u2>* fields,
@@ -4002,18 +3975,121 @@
 }
 #endif
 
-// Values needed for oopmap and InstanceKlass creation
-class ClassFileParser::FieldLayoutInfo : public ResourceObj {
- public:
-  int*          nonstatic_oop_offsets;
-  unsigned int* nonstatic_oop_counts;
-  unsigned int  nonstatic_oop_map_count;
-  unsigned int  total_oop_map_count;
-  int           instance_size;
-  int           nonstatic_field_size;
-  int           static_field_size;
-  bool          has_nonstatic_fields;
-};
+OopMapBlocksBuilder::OopMapBlocksBuilder(unsigned int max_blocks) {
+  _max_nonstatic_oop_maps = max_blocks;
+  _nonstatic_oop_map_count = 0;
+  if (max_blocks == 0) {
+    _nonstatic_oop_maps = NULL;
+  } else {
+    _nonstatic_oop_maps =
+        NEW_RESOURCE_ARRAY(OopMapBlock, _max_nonstatic_oop_maps);
+    memset(_nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks);
+  }
+}
+
+OopMapBlock* OopMapBlocksBuilder::last_oop_map() const {
+  assert(_nonstatic_oop_map_count > 0, "Has no oop maps");
+  return _nonstatic_oop_maps + (_nonstatic_oop_map_count - 1);
+}
+
+// addition of super oop maps
+void OopMapBlocksBuilder::initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) {
+  assert(nof_blocks && _nonstatic_oop_map_count == 0 &&
+         nof_blocks <= _max_nonstatic_oop_maps, "invariant");
+
+  memcpy(_nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks);
+  _nonstatic_oop_map_count += nof_blocks;
+}
+
+// collection of oops
+void OopMapBlocksBuilder::add(int offset, int count) {
+  if (_nonstatic_oop_map_count == 0) {
+    _nonstatic_oop_map_count++;
+  }
+  OopMapBlock* nonstatic_oop_map = last_oop_map();
+  if (nonstatic_oop_map->count() == 0) {  // Unused map, set it up
+    nonstatic_oop_map->set_offset(offset);
+    nonstatic_oop_map->set_count(count);
+  } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add
+    nonstatic_oop_map->increment_count(count);
+  } else { // Need a new one...
+    _nonstatic_oop_map_count++;
+    assert(_nonstatic_oop_map_count <= _max_nonstatic_oop_maps, "range check");
+    nonstatic_oop_map = last_oop_map();
+    nonstatic_oop_map->set_offset(offset);
+    nonstatic_oop_map->set_count(count);
+  }
+}
+
+// general purpose copy, e.g. into allocated instanceKlass
+void OopMapBlocksBuilder::copy(OopMapBlock* dst) {
+  if (_nonstatic_oop_map_count != 0) {
+    memcpy(dst, _nonstatic_oop_maps, sizeof(OopMapBlock) * _nonstatic_oop_map_count);
+  }
+}
+
+// Sort and compact adjacent blocks
+void OopMapBlocksBuilder::compact() {
+  if (_nonstatic_oop_map_count <= 1) {
+    return;
+  }
+  /*
+   * Since field layout sneeks in oops before values, we will be able to condense
+   * blocks. There is potential to compact between super, own refs and values
+   * containing refs.
+   *
+   * Currently compaction is slightly limited due to values being 8 byte aligned.
+   * This may well change: FixMe if it doesn't, the code below is fairly general purpose
+   * and maybe it doesn't need to be.
+   */
+  qsort(_nonstatic_oop_maps, _nonstatic_oop_map_count, sizeof(OopMapBlock),
+        (_sort_Fn)OopMapBlock::compare_offset);
+  if (_nonstatic_oop_map_count < 2) {
+    return;
+  }
+
+  // Make a temp copy, and iterate through and copy back into the original
+  ResourceMark rm;
+  OopMapBlock* oop_maps_copy =
+      NEW_RESOURCE_ARRAY(OopMapBlock, _nonstatic_oop_map_count);
+  OopMapBlock* oop_maps_copy_end = oop_maps_copy + _nonstatic_oop_map_count;
+  copy(oop_maps_copy);
+  OopMapBlock* nonstatic_oop_map = _nonstatic_oop_maps;
+  unsigned int new_count = 1;
+  oop_maps_copy++;
+  while(oop_maps_copy < oop_maps_copy_end) {
+    assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant");
+    if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) {
+      nonstatic_oop_map->increment_count(oop_maps_copy->count());
+    } else {
+      nonstatic_oop_map++;
+      new_count++;
+      nonstatic_oop_map->set_offset(oop_maps_copy->offset());
+      nonstatic_oop_map->set_count(oop_maps_copy->count());
+    }
+    oop_maps_copy++;
+  }
+  assert(new_count <= _nonstatic_oop_map_count, "end up with more maps after compact() ?");
+  _nonstatic_oop_map_count = new_count;
+}
+
+void OopMapBlocksBuilder::print_on(outputStream* st) const {
+  st->print_cr("  OopMapBlocks: %3d  /%3d", _nonstatic_oop_map_count, _max_nonstatic_oop_maps);
+  if (_nonstatic_oop_map_count > 0) {
+    OopMapBlock* map = _nonstatic_oop_maps;
+    OopMapBlock* last_map = last_oop_map();
+    assert(map <= last_map, "Last less than first");
+    while (map <= last_map) {
+      st->print_cr("    Offset: %3d  -%3d Count: %3d", map->offset(),
+                   map->offset() + map->offset_span() - heapOopSize, map->count());
+      map++;
+    }
+  }
+}
+
+void OopMapBlocksBuilder::print_value_on(outputStream* st) const {
+  print_on(st);
+}
 
 // Layout fields and fill in FieldLayoutInfo.  Could use more refactoring!
 void ClassFileParser::layout_fields(ConstantPool* cp,
@@ -4100,16 +4176,15 @@
   // count[i] oops following. Before we know how many regions are required,
   // we pessimistically allocate the maps to fit all the oops into the
   // distinct regions.
-  //
-  // TODO: We add +1 to always allocate non-zero resource arrays; we need
-  // to figure out if we still need to do this.
-  unsigned int nonstatic_oop_map_count = 0;
-  unsigned int max_nonstatic_oop_maps  = fac->count[NONSTATIC_OOP] + 1;
-
-  int* nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
-            THREAD, int, max_nonstatic_oop_maps);
-  unsigned int* const nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
-            THREAD, unsigned int, max_nonstatic_oop_maps);
+
+  int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
+  int max_oop_map_count = super_oop_map_count + fac->count[NONSTATIC_OOP];
+
+  OopMapBlocksBuilder* nonstatic_oop_maps = new OopMapBlocksBuilder(max_oop_map_count);
+  if (super_oop_map_count > 0) {
+    nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
+                                                    _super_klass->nonstatic_oop_map_count());
+  }
 
   int first_nonstatic_oop_offset = 0; // will be set for first oop field
 
@@ -4260,26 +4335,7 @@
           real_offset = next_nonstatic_oop_offset;
           next_nonstatic_oop_offset += heapOopSize;
         }
-
-        // Record this oop in the oop maps
-        if( nonstatic_oop_map_count > 0 &&
-            nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
-            real_offset -
-            int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
-            heapOopSize ) {
-          // This oop is adjacent to the previous one, add to current oop map
-          assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
-          nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
-        } else {
-          // This oop is not adjacent to the previous one, create new oop map
-          assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
-          nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
-          nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
-          nonstatic_oop_map_count += 1;
-          if( first_nonstatic_oop_offset == 0 ) { // Undefined
-            first_nonstatic_oop_offset = real_offset;
-          }
-        }
+        nonstatic_oop_maps->add(real_offset, 1);
         break;
       case NONSTATIC_BYTE:
         if( nonstatic_byte_space_count > 0 ) {
@@ -4392,26 +4448,7 @@
             next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += heapOopSize;
-
-            // Record this oop in the oop maps
-            if( nonstatic_oop_map_count > 0 &&
-                nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
-                real_offset -
-                int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
-                heapOopSize ) {
-              // This oop is adjacent to the previous one, add to current oop map
-              assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
-              nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
-            } else {
-              // This oop is not adjacent to the previous one, create new oop map
-              assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
-              nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
-              nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
-              nonstatic_oop_map_count += 1;
-              if( first_nonstatic_oop_offset == 0 ) { // Undefined
-                first_nonstatic_oop_offset = real_offset;
-              }
-            }
+            nonstatic_oop_maps->add(real_offset, 1);
             break;
 
           default:
@@ -4475,9 +4512,7 @@
          (nonstatic_fields_count > 0), "double-check nonstatic start/end");
 
   // Number of non-static oop map blocks allocated at end of klass.
-  const unsigned int total_oop_map_count =
-    compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
-                          first_nonstatic_oop_offset);
+  nonstatic_oop_maps->compact();
 
 #ifndef PRODUCT
   if (PrintFieldLayout) {
@@ -4492,58 +4527,13 @@
 
 #endif
   // Pass back information needed for InstanceKlass creation
-  info->nonstatic_oop_offsets = nonstatic_oop_offsets;
-  info->nonstatic_oop_counts = nonstatic_oop_counts;
-  info->nonstatic_oop_map_count = nonstatic_oop_map_count;
-  info->total_oop_map_count = total_oop_map_count;
-  info->instance_size = instance_size;
-  info->static_field_size = static_field_size;
-  info->nonstatic_field_size = nonstatic_field_size;
-  info->has_nonstatic_fields = has_nonstatic_fields;
+  info->oop_map_blocks = nonstatic_oop_maps;
+  info->_instance_size = instance_size;
+  info->_static_field_size = static_field_size;
+  info->_nonstatic_field_size = nonstatic_field_size;
+  info->_has_nonstatic_fields = has_nonstatic_fields;
 }
 
-static void fill_oop_maps(const InstanceKlass* k,
-                          unsigned int nonstatic_oop_map_count,
-                          const int* nonstatic_oop_offsets,
-                          const unsigned int* nonstatic_oop_counts) {
-
-  assert(k != NULL, "invariant");
-
-  OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
-  const InstanceKlass* const super = k->superklass();
-  const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0;
-  if (super_count > 0) {
-    // Copy maps from superklass
-    OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
-    for (unsigned int i = 0; i < super_count; ++i) {
-      *this_oop_map++ = *super_oop_map++;
-    }
-  }
-
-  if (nonstatic_oop_map_count > 0) {
-    if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) {
-      // The counts differ because there is no gap between superklass's last oop
-      // field and the first local oop field.  Extend the last oop map copied
-      // from the superklass instead of creating new one.
-      nonstatic_oop_map_count--;
-      nonstatic_oop_offsets++;
-      this_oop_map--;
-      this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++);
-      this_oop_map++;
-    }
-
-    // Add new map blocks, fill them
-    while (nonstatic_oop_map_count-- > 0) {
-      this_oop_map->set_offset(*nonstatic_oop_offsets++);
-      this_oop_map->set_count(*nonstatic_oop_counts++);
-      this_oop_map++;
-    }
-    assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() ==
-           this_oop_map, "sanity");
-  }
-}
-
-
 void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) {
   assert(ik != NULL, "invariant");
 
@@ -5498,17 +5488,17 @@
 
 int ClassFileParser::static_field_size() const {
   assert(_field_info != NULL, "invariant");
-  return _field_info->static_field_size;
+  return _field_info->_static_field_size;
 }
 
 int ClassFileParser::total_oop_map_count() const {
   assert(_field_info != NULL, "invariant");
-  return _field_info->total_oop_map_count;
+  return _field_info->oop_map_blocks->_nonstatic_oop_map_count;
 }
 
 jint ClassFileParser::layout_size() const {
   assert(_field_info != NULL, "invariant");
-  return _field_info->instance_size;
+  return _field_info->_instance_size;
 }
 
 static void check_methods_for_intrinsics(const InstanceKlass* ik,
@@ -5652,19 +5642,19 @@
   set_klass_to_deallocate(ik);
 
   assert(_field_info != NULL, "invariant");
-  assert(ik->static_field_size() == _field_info->static_field_size, "sanity");
-  assert(ik->nonstatic_oop_map_count() == _field_info->total_oop_map_count,
-    "sanity");
+  assert(ik->static_field_size() == _field_info->_static_field_size, "sanity");
+  assert(ik->nonstatic_oop_map_count() == _field_info->oop_map_blocks->_nonstatic_oop_map_count,
+         "sanity");
 
   assert(ik->is_instance_klass(), "sanity");
-  assert(ik->size_helper() == _field_info->instance_size, "sanity");
+  assert(ik->size_helper() == _field_info->_instance_size, "sanity");
 
   // Fill in information already parsed
   ik->set_should_verify_class(_need_verify);
 
   // Not yet: supers are done below to support the new subtype-checking fields
-  ik->set_nonstatic_field_size(_field_info->nonstatic_field_size);
-  ik->set_has_nonstatic_fields(_field_info->has_nonstatic_fields);
+  ik->set_nonstatic_field_size(_field_info->_nonstatic_field_size);
+  ik->set_has_nonstatic_fields(_field_info->_has_nonstatic_fields);
   assert(_fac != NULL, "invariant");
   ik->set_static_oop_field_count(_fac->count[STATIC_OOP]);
 
@@ -5755,10 +5745,15 @@
 
   // Compute transitive closure of interfaces this class implements
   // Do final class setup
-  fill_oop_maps(ik,
-                _field_info->nonstatic_oop_map_count,
-                _field_info->nonstatic_oop_offsets,
-                _field_info->nonstatic_oop_counts);
+  OopMapBlocksBuilder* oop_map_blocks = _field_info->oop_map_blocks;
+  if (oop_map_blocks->_nonstatic_oop_map_count > 0) {
+    oop_map_blocks->copy(ik->start_of_nonstatic_oop_maps());
+  }
+
+  if (_has_contended_fields || _parsed_annotations->is_contended() ||
+      ( _super_klass != NULL && _super_klass->has_contended_annotations())) {
+    ik->set_has_contended_annotations(true);
+  }
 
   // Fill in has_finalizer, has_vanilla_constructor, and layout_helper
   set_precomputed_flags(ik);
@@ -6001,6 +5996,7 @@
   _has_nonstatic_concrete_methods(false),
   _declares_nonstatic_concrete_methods(false),
   _has_final_method(false),
+  _has_contended_fields(false),
   _has_finalizer(false),
   _has_empty_finalizer(false),
   _has_vanilla_constructor(false),
@@ -6478,7 +6474,13 @@
   assert(_parsed_annotations != NULL, "invariant");
 
   _field_info = new FieldLayoutInfo();
-  layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK);
+  if (UseNewFieldLayout) {
+    FieldLayoutBuilder lb(class_name(), super_klass(), _cp, _fields,
+                          _parsed_annotations->is_contended(), _field_info);
+    lb.build_layout();
+  } else {
+    layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK);
+  }
 
   // Compute reference typ
   _rt = (NULL ==_super_klass) ? REF_NONE : _super_klass->reference_type();
--- a/src/hotspot/share/classfile/classFileParser.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/classfile/classFileParser.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "memory/referenceType.hpp"
 #include "oops/annotations.hpp"
 #include "oops/constantPool.hpp"
+#include "oops/instanceKlass.hpp"
 #include "oops/typeArrayOop.hpp"
 #include "utilities/accessFlags.hpp"
 
@@ -45,17 +46,46 @@
 class RecordComponent;
 class Symbol;
 class TempNewSymbol;
+class FieldLayoutBuilder;
+
+// Utility to collect and compact oop maps during layout
+class OopMapBlocksBuilder : public ResourceObj {
+ public:
+  OopMapBlock* _nonstatic_oop_maps;
+  unsigned int _nonstatic_oop_map_count;
+  unsigned int _max_nonstatic_oop_maps;
+
+  OopMapBlocksBuilder(unsigned int  max_blocks);
+  OopMapBlock* last_oop_map() const;
+  void initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks);
+  void add(int offset, int count);
+  void copy(OopMapBlock* dst);
+  void compact();
+  void print_on(outputStream* st) const;
+  void print_value_on(outputStream* st) const;
+};
+
+// Values needed for oopmap and InstanceKlass creation
+class FieldLayoutInfo : public ResourceObj {
+ public:
+  OopMapBlocksBuilder* oop_map_blocks;
+  int _instance_size;
+  int _nonstatic_field_size;
+  int _static_field_size;
+  bool  _has_nonstatic_fields;
+};
 
 // Parser for for .class files
 //
 // The bytes describing the class file structure is read from a Stream object
 
 class ClassFileParser {
+  friend class FieldLayoutBuilder;
+  friend class FieldLayout;
 
- class ClassAnnotationCollector;
- class FieldAllocationCount;
- class FieldAnnotationCollector;
- class FieldLayoutInfo;
+  class ClassAnnotationCollector;
+  class FieldAllocationCount;
+  class FieldAnnotationCollector;
 
  public:
   // The ClassFileParser has an associated "publicity" level
@@ -161,6 +191,7 @@
   bool _has_nonstatic_concrete_methods;
   bool _declares_nonstatic_concrete_methods;
   bool _has_final_method;
+  bool _has_contended_fields;
 
   // precomputed flags
   bool _has_finalizer;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/classfile/fieldLayoutBuilder.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "classfile/classFileParser.hpp"
+#include "classfile/fieldLayoutBuilder.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/array.hpp"
+#include "oops/fieldStreams.inline.hpp"
+#include "oops/instanceMirrorKlass.hpp"
+#include "oops/klass.inline.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
+
+
+LayoutRawBlock::LayoutRawBlock(Kind kind, int size) :
+  _next_block(NULL),
+  _prev_block(NULL),
+  _kind(kind),
+  _offset(-1),
+  _alignment(1),
+  _size(size),
+  _field_index(-1),
+  _is_reference(false) {
+  assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
+         "Otherwise, should use the constructor with a field index argument");
+  assert(size > 0, "Sanity check");
+}
+
+
+LayoutRawBlock::LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference) :
+ _next_block(NULL),
+ _prev_block(NULL),
+ _kind(kind),
+ _offset(-1),
+ _alignment(alignment),
+ _size(size),
+ _field_index(index),
+ _is_reference(is_reference) {
+  assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED,
+         "Other kind do not have a field index");
+  assert(size > 0, "Sanity check");
+  assert(alignment > 0, "Sanity check");
+}
+
+bool LayoutRawBlock::fit(int size, int alignment) {
+  int adjustment = 0;
+  if ((_offset % alignment) != 0) {
+    adjustment = alignment - (_offset % alignment);
+  }
+  return _size >= size + adjustment;
+}
+
+FieldGroup::FieldGroup(int contended_group) :
+  _next(NULL),
+  _primitive_fields(NULL),
+  _oop_fields(NULL),
+  _contended_group(contended_group),  // -1 means no contended group, 0 means default contended group
+  _oop_count(0) {}
+
+void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
+  int size = type2aelembytes(type);
+  LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
+  if (_primitive_fields == NULL) {
+    _primitive_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
+  }
+  _primitive_fields->append(block);
+}
+
+void FieldGroup::add_oop_field(AllFieldStream fs) {
+  int size = type2aelembytes(T_OBJECT);
+  LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::REGULAR, size, size /* alignment == size for oops */, true);
+  if (_oop_fields == NULL) {
+    _oop_fields = new(ResourceObj::RESOURCE_AREA, mtInternal) GrowableArray<LayoutRawBlock*>(INITIAL_LIST_SIZE);
+  }
+  _oop_fields->append(block);
+  _oop_count++;
+}
+
+void FieldGroup::sort_by_size() {
+  if (_primitive_fields != NULL) {
+    _primitive_fields->sort(LayoutRawBlock::compare_size_inverted);
+  }
+}
+
+FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) :
+  _fields(fields),
+  _cp(cp),
+  _blocks(NULL),
+  _start(_blocks),
+  _last(_blocks) {}
+
+void FieldLayout::initialize_static_layout() {
+  _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
+  _blocks->set_offset(0);
+  _last = _blocks;
+  _start = _blocks;
+  // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
+  // during bootstrapping, the size of the java.lang.Class is still not known when layout
+  // of static field is computed. Field offsets are fixed later when the size is known
+  // (see java_lang_Class::fixup_mirror())
+  if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
+    insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
+    _blocks->set_offset(0);
+  }
+}
+
+void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klass) {
+  if (super_klass == NULL) {
+    _blocks = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
+    _blocks->set_offset(0);
+    _last = _blocks;
+    _start = _blocks;
+    insert(first_empty_block(), new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
+  } else {
+    reconstruct_layout(super_klass);
+    fill_holes(super_klass);
+    if (UseEmptySlotsInSupers && !super_klass->has_contended_annotations()) {
+      _start = _blocks; // Setting _start to _blocks instead of _last would allow subclasses
+                        // to allocate fields in empty slots of their super classes
+    } else {
+      _start = _last;
+    }
+  }
+}
+
+LayoutRawBlock* FieldLayout::first_field_block() {
+  LayoutRawBlock* block = _start;
+  while (block->kind() != LayoutRawBlock::INHERITED && block->kind() != LayoutRawBlock::REGULAR
+      && block->kind() != LayoutRawBlock::FLATTENED && block->kind() != LayoutRawBlock::PADDING) {
+    block = block->next_block();
+  }
+  return block;
+}
+
+
+// Insert a set of fields into a layout using a best-fit strategy.
+// For each field, search for the smallest empty slot able to fit the field
+// (satisfying both size and alignment requirements), if none is found,
+// add the field at the end of the layout.
+// Fields cannot be inserted before the block specified in the "start" argument
+void FieldLayout::add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
+  if (list == NULL) return;
+  if (start == NULL) start = this->_start;
+  bool last_search_success = false;
+  int last_size = 0;
+  int last_alignment = 0;
+  for (int i = 0; i < list->length(); i ++) {
+    LayoutRawBlock* b = list->at(i);
+    LayoutRawBlock* cursor = NULL;
+    LayoutRawBlock* candidate = NULL;
+
+    // if start is the last block, just append the field
+    if (start == last_block()) {
+      candidate = last_block();
+    }
+    // Before iterating over the layout to find an empty slot fitting the field's requirements,
+    // check if the previous field had the same requirements and if the search for a fitting slot
+    // was successful. If the requirements were the same but the search failed, a new search will
+    // fail the same way, so just append the field at the of the layout.
+    else  if (b->size() == last_size && b->alignment() == last_alignment && !last_search_success) {
+      candidate = last_block();
+    } else {
+      // Iterate over the layout to find an empty slot fitting the field's requirements
+      last_size = b->size();
+      last_alignment = b->alignment();
+      cursor = last_block()->prev_block();
+      assert(cursor != NULL, "Sanity check");
+      last_search_success = true;
+      while (cursor != start) {
+        if (cursor->kind() == LayoutRawBlock::EMPTY && cursor->fit(b->size(), b->alignment())) {
+          if (candidate == NULL || cursor->size() < candidate->size()) {
+            candidate = cursor;
+          }
+        }
+        cursor = cursor->prev_block();
+      }
+      if (candidate == NULL) {
+        candidate = last_block();
+        last_search_success = false;
+      }
+      assert(candidate != NULL, "Candidate must not be null");
+      assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
+      assert(candidate->fit(b->size(), b->alignment()), "Candidate must be able to store the block");
+    }
+
+    insert_field_block(candidate, b);
+  }
+}
+
+// Used for classes with hard coded field offsets, insert a field at the specified offset */
+void FieldLayout::add_field_at_offset(LayoutRawBlock* block, int offset, LayoutRawBlock* start) {
+  assert(block != NULL, "Sanity check");
+  block->set_offset(offset);
+  if (start == NULL) {
+    start = this->_start;
+  }
+  LayoutRawBlock* slot = start;
+  while (slot != NULL) {
+    if ((slot->offset() <= block->offset() && (slot->offset() + slot->size()) > block->offset()) ||
+        slot == _last){
+      assert(slot->kind() == LayoutRawBlock::EMPTY, "Matching slot must be an empty slot");
+      assert(slot->size() >= block->offset() + block->size() ,"Matching slot must be big enough");
+      if (slot->offset() < block->offset()) {
+        int adjustment = block->offset() - slot->offset();
+        LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
+        insert(slot, adj);
+      }
+      insert(slot, block);
+      if (slot->size() == 0) {
+        remove(slot);
+      }
+      FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
+      return;
+    }
+    slot = slot->next_block();
+  }
+  fatal("Should have found a matching slot above, corrupted layout or invalid offset");
+}
+
+// The allocation logic uses a best fit strategy: the set of fields is allocated
+// in the first empty slot big enough to contain the whole set ((including padding
+// to fit alignment constraints).
+void FieldLayout::add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start) {
+  if (list == NULL) return;
+  if (start == NULL) {
+    start = _start;
+  }
+  // This code assumes that if the first block is well aligned, the following
+  // blocks would naturally be well aligned (no need for adjustment)
+  int size = 0;
+  for (int i = 0; i < list->length(); i++) {
+    size += list->at(i)->size();
+  }
+
+  LayoutRawBlock* candidate = NULL;
+  if (start == last_block()) {
+    candidate = last_block();
+  } else {
+    LayoutRawBlock* first = list->at(0);
+    candidate = last_block()->prev_block();
+    while (candidate->kind() != LayoutRawBlock::EMPTY || !candidate->fit(size, first->alignment())) {
+      if (candidate == start) {
+        candidate = last_block();
+        break;
+      }
+      candidate = candidate->prev_block();
+    }
+    assert(candidate != NULL, "Candidate must not be null");
+    assert(candidate->kind() == LayoutRawBlock::EMPTY, "Candidate must be an empty block");
+    assert(candidate->fit(size, first->alignment()), "Candidate must be able to store the whole contiguous block");
+  }
+
+  for (int i = 0; i < list->length(); i++) {
+    LayoutRawBlock* b = list->at(i);
+    insert_field_block(candidate, b);
+    assert((candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
+  }
+}
+
+LayoutRawBlock* FieldLayout::insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block) {
+  assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
+  if (slot->offset() % block->alignment() != 0) {
+    int adjustment = block->alignment() - (slot->offset() % block->alignment());
+    LayoutRawBlock* adj = new LayoutRawBlock(LayoutRawBlock::EMPTY, adjustment);
+    insert(slot, adj);
+  }
+  insert(slot, block);
+  if (slot->size() == 0) {
+    remove(slot);
+  }
+  FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
+  return block;
+}
+
+void FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
+  GrowableArray<LayoutRawBlock*>* all_fields = new GrowableArray<LayoutRawBlock*>(32);
+  while (ik != NULL) {
+    for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
+      BasicType type = Signature::basic_type(fs.signature());
+      // distinction between static and non-static fields is missing
+      if (fs.access_flags().is_static()) continue;
+      int size = type2aelembytes(type);
+      // INHERITED blocks are marked as non-reference because oop_maps are handled by their holder class
+      LayoutRawBlock* block = new LayoutRawBlock(fs.index(), LayoutRawBlock::INHERITED, size, size, false);
+      block->set_offset(fs.offset());
+      all_fields->append(block);
+    }
+    ik = ik->super() == NULL ? NULL : InstanceKlass::cast(ik->super());
+  }
+
+  all_fields->sort(LayoutRawBlock::compare_offset);
+  _blocks = new LayoutRawBlock(LayoutRawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
+  _blocks->set_offset(0);
+  _last = _blocks;
+
+  for(int i = 0; i < all_fields->length(); i++) {
+    LayoutRawBlock* b = all_fields->at(i);
+    _last->set_next_block(b);
+    b->set_prev_block(_last);
+    _last = b;
+  }
+  _start = _blocks;
+}
+
+// Called during the reconstruction of a layout, after fields from super
+// classes have been inserted. It fills unused slots between inserted fields
+// with EMPTY blocks, so the regular field insertion methods would work.
+// This method handles classes with @Contended annotations differently
+// by inserting PADDING blocks instead of EMPTY block to prevent subclasses'
+// fields to interfere with contended fields/classes.
+void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
+  assert(_blocks != NULL, "Sanity check");
+  assert(_blocks->offset() == 0, "first block must be at offset zero");
+  LayoutRawBlock::Kind filling_type = super_klass->has_contended_annotations() ? LayoutRawBlock::PADDING: LayoutRawBlock::EMPTY;
+  LayoutRawBlock* b = _blocks;
+  while (b->next_block() != NULL) {
+    if (b->next_block()->offset() > (b->offset() + b->size())) {
+      int size = b->next_block()->offset() - (b->offset() + b->size());
+      LayoutRawBlock* empty = new LayoutRawBlock(filling_type, size);
+      empty->set_offset(b->offset() + b->size());
+      empty->set_next_block(b->next_block());
+      b->next_block()->set_prev_block(empty);
+      b->set_next_block(empty);
+      empty->set_prev_block(b);
+    }
+    b = b->next_block();
+  }
+  assert(b->next_block() == NULL, "Invariant at this point");
+  assert(b->kind() != LayoutRawBlock::EMPTY, "Sanity check");
+
+  // If the super class has @Contended annotation, a padding block is
+  // inserted at the end to ensure that fields from the subclasses won't share
+  // the cache line of the last field of the contended class
+  if (super_klass->has_contended_annotations() && ContendedPaddingWidth > 0) {
+    LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
+    p->set_offset(b->offset() + b->size());
+    b->set_next_block(p);
+    p->set_prev_block(b);
+    b = p;
+  }
+
+  if (!UseEmptySlotsInSupers) {
+    // Add an empty slots to align fields of the subclass on a heapOopSize boundary
+    // in order to emulate the behavior of the previous algorithm
+    int align = (b->offset() + b->size()) % heapOopSize;
+    if (align != 0) {
+      int sz = heapOopSize - align;
+      LayoutRawBlock* p = new LayoutRawBlock(LayoutRawBlock::EMPTY, sz);
+      p->set_offset(b->offset() + b->size());
+      b->set_next_block(p);
+      p->set_prev_block(b);
+      b = p;
+    }
+  }
+
+  LayoutRawBlock* last = new LayoutRawBlock(LayoutRawBlock::EMPTY, INT_MAX);
+  last->set_offset(b->offset() + b->size());
+  assert(last->offset() > 0, "Sanity check");
+  b->set_next_block(last);
+  last->set_prev_block(b);
+  _last = last;
+}
+
+LayoutRawBlock* FieldLayout::insert(LayoutRawBlock* slot, LayoutRawBlock* block) {
+  assert(slot->kind() == LayoutRawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
+  assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
+  block->set_offset(slot->offset());
+  slot->set_offset(slot->offset() + block->size());
+  assert((slot->size() - block->size()) < slot->size(), "underflow checking");
+  assert(slot->size() - block->size() >= 0, "no negative size allowed");
+  slot->set_size(slot->size() - block->size());
+  block->set_prev_block(slot->prev_block());
+  block->set_next_block(slot);
+  slot->set_prev_block(block);
+  if (block->prev_block() != NULL) {
+    block->prev_block()->set_next_block(block);
+  }
+  if (_blocks == slot) {
+    _blocks = block;
+  }
+  return block;
+}
+
+void FieldLayout::remove(LayoutRawBlock* block) {
+  assert(block != NULL, "Sanity check");
+  assert(block != _last, "Sanity check");
+  if (_blocks == block) {
+    _blocks = block->next_block();
+    if (_blocks != NULL) {
+      _blocks->set_prev_block(NULL);
+    }
+  } else {
+    assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
+    block->prev_block()->set_next_block(block->next_block());
+    block->next_block()->set_prev_block(block->prev_block());
+  }
+  if (block == _start) {
+    _start = block->prev_block();
+  }
+}
+
+void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlass* super) {
+  ResourceMark rm;
+  LayoutRawBlock* b = _blocks;
+  while(b != _last) {
+    switch(b->kind()) {
+      case LayoutRawBlock::REGULAR: {
+        FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
+        output->print_cr(" @%d \"%s\" %s %d/%d %s",
+                         b->offset(),
+                         fi->name(_cp)->as_C_string(),
+                         fi->signature(_cp)->as_C_string(),
+                         b->size(),
+                         b->alignment(),
+                         "REGULAR");
+        break;
+      }
+      case LayoutRawBlock::FLATTENED: {
+        FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
+        output->print_cr(" @%d \"%s\" %s %d/%d %s",
+                         b->offset(),
+                         fi->name(_cp)->as_C_string(),
+                         fi->signature(_cp)->as_C_string(),
+                         b->size(),
+                         b->alignment(),
+                         "FLATTENED");
+        break;
+      }
+      case LayoutRawBlock::RESERVED: {
+        output->print_cr(" @%d %d/- %s",
+                         b->offset(),
+                         b->size(),
+                         "RESERVED");
+        break;
+      }
+      case LayoutRawBlock::INHERITED: {
+        assert(!is_static, "Static fields are not inherited in layouts");
+        assert(super != NULL, "super klass must be provided to retrieve inherited fields info");
+        bool found = false;
+        const InstanceKlass* ik = super;
+        while (!found && ik != NULL) {
+          for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
+            if (fs.offset() == b->offset()) {
+              output->print_cr(" @%d \"%s\" %s %d/%d %s",
+                  b->offset(),
+                  fs.name()->as_C_string(),
+                  fs.signature()->as_C_string(),
+                  b->size(),
+                  b->size(), // so far, alignment constraint == size, will change with Valhalla
+                  "INHERITED");
+              found = true;
+              break;
+            }
+          }
+          ik = ik->java_super();
+        }
+        break;
+      }
+      case LayoutRawBlock::EMPTY:
+        output->print_cr(" @%d %d/1 %s",
+                         b->offset(),
+                         b->size(),
+                        "EMPTY");
+        break;
+      case LayoutRawBlock::PADDING:
+        output->print_cr(" @%d %d/1 %s",
+                         b->offset(),
+                         b->size(),
+                        "PADDING");
+        break;
+    }
+    b = b->next_block();
+  }
+}
+
+FieldLayoutBuilder::FieldLayoutBuilder(const Symbol* classname, const InstanceKlass* super_klass, ConstantPool* constant_pool,
+      Array<u2>* fields, bool is_contended, FieldLayoutInfo* info) :
+  _classname(classname),
+  _super_klass(super_klass),
+  _constant_pool(constant_pool),
+  _fields(fields),
+  _info(info),
+  _root_group(NULL),
+  _contended_groups(GrowableArray<FieldGroup*>(8)),
+  _static_fields(NULL),
+  _layout(NULL),
+  _static_layout(NULL),
+  _nonstatic_oopmap_count(0),
+  _alignment(-1),
+  _has_nonstatic_fields(false),
+  _is_contended(is_contended) {}
+
+
+FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
+  assert(g > 0, "must only be called for named contended groups");
+  FieldGroup* fg = NULL;
+  for (int i = 0; i < _contended_groups.length(); i++) {
+    fg = _contended_groups.at(i);
+    if (fg->contended_group() == g) return fg;
+  }
+  fg = new FieldGroup(g);
+  _contended_groups.append(fg);
+  return fg;
+}
+
+void FieldLayoutBuilder::prologue() {
+  _layout = new FieldLayout(_fields, _constant_pool);
+  const InstanceKlass* super_klass = _super_klass;
+  _layout->initialize_instance_layout(super_klass);
+  if (super_klass != NULL) {
+    _has_nonstatic_fields = super_klass->has_nonstatic_fields();
+  }
+  _static_layout = new FieldLayout(_fields, _constant_pool);
+  _static_layout->initialize_static_layout();
+  _static_fields = new FieldGroup();
+  _root_group = new FieldGroup();
+}
+
+// Field sorting for regular classes:
+//   - fields are sorted in static and non-static fields
+//   - non-static fields are also sorted according to their contention group
+//     (support of the @Contended annotation)
+//   - @Contended annotation is ignored for static fields
+void FieldLayoutBuilder::regular_field_sorting() {
+  for (AllFieldStream fs(_fields, _constant_pool); !fs.done(); fs.next()) {
+    FieldGroup* group = NULL;
+    if (fs.access_flags().is_static()) {
+      group = _static_fields;
+    } else {
+      _has_nonstatic_fields = true;
+      if (fs.is_contended()) {
+        int g = fs.contended_group();
+        if (g == 0) {
+          group = new FieldGroup(true);
+          _contended_groups.append(group);
+        } else {
+          group = get_or_create_contended_group(g);
+        }
+      } else {
+        group = _root_group;
+      }
+    }
+    assert(group != NULL, "invariant");
+    BasicType type = Signature::basic_type(fs.signature());
+    switch(type) {
+      case T_BYTE:
+      case T_CHAR:
+      case T_DOUBLE:
+      case T_FLOAT:
+      case T_INT:
+      case T_LONG:
+      case T_SHORT:
+      case T_BOOLEAN:
+        group->add_primitive_field(fs, type);
+        break;
+      case T_OBJECT:
+      case T_ARRAY:
+        if (group != _static_fields) _nonstatic_oopmap_count++;
+        group->add_oop_field(fs);
+        break;
+      default:
+        fatal("Something wrong?");
+    }
+  }
+  _root_group->sort_by_size();
+  _static_fields->sort_by_size();
+  if (!_contended_groups.is_empty()) {
+    for (int i = 0; i < _contended_groups.length(); i++) {
+      _contended_groups.at(i)->sort_by_size();
+    }
+  }
+}
+
+void FieldLayoutBuilder::insert_contended_padding(LayoutRawBlock* slot) {
+  if (ContendedPaddingWidth > 0) {
+    LayoutRawBlock* padding = new LayoutRawBlock(LayoutRawBlock::PADDING, ContendedPaddingWidth);
+    _layout->insert(slot, padding);
+  }
+}
+
+// Computation of regular classes layout is an evolution of the previous default layout
+// (FieldAllocationStyle 1):
+//   - primitive fields are allocated first (from the biggest to the smallest)
+//   - then oop fields are allocated, either in existing gaps or at the end of
+//     the layout
+void FieldLayoutBuilder::compute_regular_layout() {
+  bool need_tail_padding = false;
+  prologue();
+  regular_field_sorting();
+
+  if (_is_contended) {
+    _layout->set_start(_layout->last_block());
+    // insertion is currently easy because the current strategy doesn't try to fill holes
+    // in super classes layouts => the _start block is by consequence the _last_block
+    insert_contended_padding(_layout->start());
+    need_tail_padding = true;
+  }
+  _layout->add(_root_group->primitive_fields());
+  _layout->add(_root_group->oop_fields());
+
+  if (!_contended_groups.is_empty()) {
+    for (int i = 0; i < _contended_groups.length(); i++) {
+      FieldGroup* cg = _contended_groups.at(i);
+      LayoutRawBlock* start = _layout->last_block();
+      insert_contended_padding(start);
+      _layout->add(cg->primitive_fields(), start);
+      _layout->add(cg->oop_fields(), start);
+      need_tail_padding = true;
+    }
+  }
+
+  if (need_tail_padding) {
+    insert_contended_padding(_layout->last_block());
+  }
+
+  _static_layout->add_contiguously(this->_static_fields->oop_fields());
+  _static_layout->add(this->_static_fields->primitive_fields());
+
+  epilogue();
+}
+
+// Compute layout of the java/lang/ref/Reference class according
+// to the hard coded offsets of its fields
+void FieldLayoutBuilder::compute_java_lang_ref_Reference_layout() {
+  prologue();
+  regular_field_sorting();
+
+  assert(_contended_groups.is_empty(), "java.lang.Reference has no @Contended annotations");
+  assert(_root_group->primitive_fields() == NULL, "java.lang.Reference has no nonstatic primitive fields");
+  int field_count = 0;
+  int offset = -1;
+  for (int i = 0; i < _root_group->oop_fields()->length(); i++) {
+    LayoutRawBlock* b = _root_group->oop_fields()->at(i);
+    FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
+    if (fi->name(_constant_pool)->equals("referent")) {
+      offset = java_lang_ref_Reference::referent_offset;
+    } else if (fi->name(_constant_pool)->equals("queue")) {
+      offset = java_lang_ref_Reference::queue_offset;
+    } else if (fi->name(_constant_pool)->equals("next")) {
+      offset = java_lang_ref_Reference::next_offset;
+    } else if (fi->name(_constant_pool)->equals("discovered")) {
+      offset = java_lang_ref_Reference::discovered_offset;
+    }
+    assert(offset != -1, "Unknown field");
+    _layout->add_field_at_offset(b, offset);
+    field_count++;
+  }
+  assert(field_count == 4, "Wrong number of fields in java.lang.ref.Reference");
+
+  _static_layout->add_contiguously(this->_static_fields->oop_fields());
+  _static_layout->add(this->_static_fields->primitive_fields());
+
+  epilogue();
+}
+
+// Compute layout of the boxing class according
+// to the hard coded offsets of their fields
+void FieldLayoutBuilder::compute_boxing_class_layout() {
+  prologue();
+  regular_field_sorting();
+
+  assert(_contended_groups.is_empty(), "Boxing classes have no @Contended annotations");
+  assert(_root_group->oop_fields() == NULL, "Boxing classes have no nonstatic oops fields");
+  int field_count = 0;
+  int offset = -1;
+
+  for (int i = 0; i < _root_group->primitive_fields()->length(); i++) {
+    LayoutRawBlock* b = _root_group->primitive_fields()->at(i);
+    FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
+    assert(fi->name(_constant_pool)->equals("value"), "Boxing classes have a single nonstatic field named 'value'");
+    BasicType type = Signature::basic_type(fi->signature(_constant_pool));
+    offset = java_lang_boxing_object::value_offset_in_bytes(type);
+    assert(offset != -1, "Unknown field");
+    _layout->add_field_at_offset(b, offset);
+    field_count++;
+  }
+  assert(field_count == 1, "Wrong number of fields for a boxing class");
+
+  _static_layout->add_contiguously(this->_static_fields->oop_fields());
+  _static_layout->add(this->_static_fields->primitive_fields());
+
+  epilogue();
+}
+
+void FieldLayoutBuilder::epilogue() {
+  // Computing oopmaps
+  int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
+  int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
+
+  OopMapBlocksBuilder* nonstatic_oop_maps =
+      new OopMapBlocksBuilder(max_oop_map_count);
+  if (super_oop_map_count > 0) {
+    nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(),
+    _super_klass->nonstatic_oop_map_count());
+  }
+
+  if (_root_group->oop_fields() != NULL) {
+    for (int i = 0; i < _root_group->oop_fields()->length(); i++) {
+      LayoutRawBlock* b = _root_group->oop_fields()->at(i);
+      nonstatic_oop_maps->add(b->offset(), 1);
+    }
+  }
+
+  if (!_contended_groups.is_empty()) {
+    for (int i = 0; i < _contended_groups.length(); i++) {
+      FieldGroup* cg = _contended_groups.at(i);
+      if (cg->oop_count() > 0) {
+        assert(cg->oop_fields() != NULL && cg->oop_fields()->at(0) != NULL, "oop_count > 0 but no oop fields found");
+        nonstatic_oop_maps->add(cg->oop_fields()->at(0)->offset(), cg->oop_count());
+      }
+    }
+  }
+
+  nonstatic_oop_maps->compact();
+
+  int instance_end = align_up(_layout->last_block()->offset(), wordSize);
+  int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
+  int static_fields_size = (static_fields_end -
+      InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
+  int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
+
+  // Pass back information needed for InstanceKlass creation
+
+  _info->oop_map_blocks = nonstatic_oop_maps;
+  _info->_instance_size = align_object_size(instance_end / wordSize);
+  _info->_static_field_size = static_fields_size;
+  _info->_nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
+  _info->_has_nonstatic_fields = _has_nonstatic_fields;
+
+  if (PrintFieldLayout) {
+    ResourceMark rm;
+    tty->print_cr("Layout of class %s", _classname->as_C_string());
+    tty->print_cr("Instance fields:");
+    _layout->print(tty, false, _super_klass);
+    tty->print_cr("Static fields:");
+    _static_layout->print(tty, true, NULL);
+    tty->print_cr("Instance size = %d bytes", _info->_instance_size * wordSize);
+    tty->print_cr("---");
+  }
+}
+
+void FieldLayoutBuilder::build_layout() {
+  if (_classname == vmSymbols::java_lang_ref_Reference()) {
+    compute_java_lang_ref_Reference_layout();
+  } else if (_classname == vmSymbols::java_lang_Boolean() ||
+             _classname == vmSymbols::java_lang_Character() ||
+             _classname == vmSymbols::java_lang_Float() ||
+             _classname == vmSymbols::java_lang_Double() ||
+             _classname == vmSymbols::java_lang_Byte() ||
+             _classname == vmSymbols::java_lang_Short() ||
+             _classname == vmSymbols::java_lang_Integer() ||
+             _classname == vmSymbols::java_lang_Long()) {
+    compute_boxing_class_layout();
+  } else {
+    compute_regular_layout();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/classfile/fieldLayoutBuilder.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP
+#define SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP
+
+#include "classfile/classFileParser.hpp"
+#include "classfile/classLoaderData.hpp"
+#include "memory/allocation.hpp"
+#include "oops/fieldStreams.hpp"
+#include "utilities/growableArray.hpp"
+
+// Classes below are used to compute the field layout of classes.
+
+
+// A LayoutRawBlock describes an element of a layout.
+// Each field is represented by a LayoutRawBlock.
+// LayoutRawBlocks can also represent elements injected by the JVM:
+// padding, empty blocks, inherited fields, etc.
+// All LayoutRawBlocks must have a size and an alignment. The size is the
+// exact size of the field expressed in bytes. The alignment is
+// the alignment constraint of the field (1 for byte, 2 for short,
+// 4 for int, 8 for long, etc.)
+//
+// LayoutRawBlock are designed to be used in two data structures:
+//   - a linked list in a layout (using _next_block, _prev_block)
+//   - a GrowableArray in field group (the growable array contains pointers to LayoutRawBlocks)
+//
+//  next/prev pointers are included in the LayoutRawBlock class to narrow
+//  the number of allocation required during the computation of a layout.
+//
+class LayoutRawBlock : public ResourceObj {
+ public:
+  // Some code relies on the order of values below.
+  enum Kind {
+    EMPTY,         // empty slot, space is taken from this to allocate fields
+    RESERVED,      // reserved for JVM usage (for instance object header)
+    PADDING,       // padding (because of alignment constraints or @Contended)
+    REGULAR,       // primitive or oop field (including non-flattened inline fields)
+    FLATTENED,     // flattened field
+    INHERITED      // field(s) inherited from super classes
+  };
+
+ private:
+  LayoutRawBlock* _next_block;
+  LayoutRawBlock* _prev_block;
+  Kind _kind;
+  int _offset;
+  int _alignment;
+  int _size;
+  int _field_index;
+  bool _is_reference;
+
+ public:
+  LayoutRawBlock(Kind kind, int size);
+  LayoutRawBlock(int index, Kind kind, int size, int alignment, bool is_reference = false);
+  LayoutRawBlock* next_block() const { return _next_block; }
+  void set_next_block(LayoutRawBlock* next) { _next_block = next; }
+  LayoutRawBlock* prev_block() const { return _prev_block; }
+  void set_prev_block(LayoutRawBlock* prev) { _prev_block = prev; }
+  Kind kind() const { return _kind; }
+  int offset() const {
+    assert(_offset >= 0, "Must be initialized");
+    return _offset;
+  }
+  void set_offset(int offset) { _offset = offset; }
+  int alignment() const { return _alignment; }
+  int size() const { return _size; }
+  void set_size(int size) { _size = size; }
+  int field_index() const {
+    assert(_field_index != -1, "Must be initialized");
+    return _field_index;
+  }
+  bool is_reference() const { return _is_reference; }
+
+  bool fit(int size, int alignment);
+
+  static int compare_offset(LayoutRawBlock** x, LayoutRawBlock** y)  { return (*x)->offset() - (*y)->offset(); }
+  // compare_size_inverted() returns the opposite of a regular compare method in order to
+  // sort fields in decreasing order.
+  // Note: with line types, the comparison should include alignment constraint if sizes are equals
+  static int compare_size_inverted(LayoutRawBlock** x, LayoutRawBlock** y)  {
+#ifdef _WINDOWS
+    // qsort() on Windows reverse the order of fields with the same size
+    // the extension of the comparison function below preserves this order
+    int diff = (*y)->size() - (*x)->size();
+    if (diff == 0) {
+      diff = (*x)->field_index() - (*y)->field_index();
+    }
+    return diff;
+#else
+    return (*y)->size() - (*x)->size();
+#endif // _WINDOWS
+  }
+
+};
+
+// A Field group represents a set of fields that have to be allocated together,
+// this is the way the @Contended annotation is supported.
+// Inside a FieldGroup, fields are sorted based on their kind: primitive,
+// oop, or flattened.
+//
+class FieldGroup : public ResourceObj {
+
+ private:
+  FieldGroup* _next;
+  GrowableArray<LayoutRawBlock*>* _primitive_fields;
+  GrowableArray<LayoutRawBlock*>* _oop_fields;
+  int _contended_group;
+  int _oop_count;
+  static const int INITIAL_LIST_SIZE = 16;
+
+ public:
+  FieldGroup(int contended_group = -1);
+
+  FieldGroup* next() const { return _next; }
+  void set_next(FieldGroup* next) { _next = next; }
+  GrowableArray<LayoutRawBlock*>* primitive_fields() const { return _primitive_fields; }
+  GrowableArray<LayoutRawBlock*>* oop_fields() const { return _oop_fields; }
+  int contended_group() const { return _contended_group; }
+  int oop_count() const { return _oop_count; }
+
+  void add_primitive_field(AllFieldStream fs, BasicType type);
+  void add_oop_field(AllFieldStream fs);
+  void sort_by_size();
+};
+
+// The FieldLayout class represents a set of fields organized
+// in a layout.
+// An instance of FieldLayout can either represent the layout
+// of non-static fields (used in an instance object) or the
+// layout of static fields (to be included in the class mirror).
+//
+// _block is a pointer to a list of LayoutRawBlock ordered by increasing
+// offsets.
+// _start points to the LayoutRawBlock with the first offset that can
+// be used to allocate fields of the current class
+// _last points to the last LayoutRawBlock of the list. In order to
+// simplify the code, the LayoutRawBlock list always ends with an
+// EMPTY block (the kind of LayoutRawBlock from which space is taken
+// to allocate fields) with a size big enough to satisfy all
+// field allocations.
+//
+class FieldLayout : public ResourceObj {
+ private:
+  Array<u2>* _fields;
+  ConstantPool* _cp;
+  LayoutRawBlock* _blocks;  // the layout being computed
+  LayoutRawBlock* _start;   // points to the first block where a field can be inserted
+  LayoutRawBlock* _last;    // points to the last block of the layout (big empty block)
+
+ public:
+  FieldLayout(Array<u2>* fields, ConstantPool* cp);
+  void initialize_static_layout();
+  void initialize_instance_layout(const InstanceKlass* ik);
+
+  LayoutRawBlock* first_empty_block() {
+    LayoutRawBlock* block = _start;
+    while (block->kind() != LayoutRawBlock::EMPTY) {
+      block = block->next_block();
+    }
+    return block;
+  }
+
+  LayoutRawBlock* start() { return _start; }
+  void set_start(LayoutRawBlock* start) { _start = start; }
+  LayoutRawBlock* last_block() { return _last; }
+
+  LayoutRawBlock* first_field_block();
+  void add(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start = NULL);
+  void add_field_at_offset(LayoutRawBlock* blocks, int offset, LayoutRawBlock* start = NULL);
+  void add_contiguously(GrowableArray<LayoutRawBlock*>* list, LayoutRawBlock* start = NULL);
+  LayoutRawBlock* insert_field_block(LayoutRawBlock* slot, LayoutRawBlock* block);
+  void reconstruct_layout(const InstanceKlass* ik);
+  void fill_holes(const InstanceKlass* ik);
+  LayoutRawBlock* insert(LayoutRawBlock* slot, LayoutRawBlock* block);
+  void remove(LayoutRawBlock* block);
+  void print(outputStream* output, bool is_static, const InstanceKlass* super);
+};
+
+
+// FieldLayoutBuilder is the main entry point for layout computation.
+// This class has three methods to generate layout: one for regular classes
+// and two for classes with hard coded offsets (java,lang.ref.Reference
+// and the boxing classes). The rationale for having multiple methods
+// is that each kind of class has a different set goals regarding
+// its layout, so instead of mixing several layout strategies into a
+// single method, each kind has its own method (see comments below
+// for more details about the allocation strategies).
+//
+// Computing the layout of a class always goes through 4 steps:
+//   1 - Prologue: preparation of data structure and gathering of
+//       layout information inherited from super classes
+//   2 - Field sorting: fields are sorted according to their
+//       kind (oop, primitive, inline class) and their contention
+//       annotation (if any)
+//   3 - Layout is computed from the set of lists generated during
+//       step 2
+//   4 - Epilogue: oopmaps are generated, layout information is
+//       prepared so other VM components can use it (instance size,
+//       static field size, non-static field size, etc.)
+//
+//  Steps 1 and 4 are common to all layout computations. Step 2 and 3
+//  can vary with the allocation strategy.
+//
+class FieldLayoutBuilder : public ResourceObj {
+ private:
+
+  const Symbol* _classname;
+  const InstanceKlass* _super_klass;
+  ConstantPool* _constant_pool;
+  Array<u2>* _fields;
+  FieldLayoutInfo* _info;
+  FieldGroup* _root_group;
+  GrowableArray<FieldGroup*> _contended_groups;
+  FieldGroup* _static_fields;
+  FieldLayout* _layout;
+  FieldLayout* _static_layout;
+  int _nonstatic_oopmap_count;
+  int _alignment;
+  bool _has_nonstatic_fields;
+  bool _is_contended; // is a contended class?
+
+ public:
+  FieldLayoutBuilder(const Symbol* classname, const InstanceKlass* super_klass, ConstantPool* constant_pool,
+                     Array<u2>* fields, bool is_contended, FieldLayoutInfo* info);
+
+  int get_alignment() {
+    assert(_alignment != -1, "Uninitialized");
+    return _alignment;
+  }
+
+  void build_layout();
+  void compute_regular_layout();
+  void compute_java_lang_ref_Reference_layout();
+  void compute_boxing_class_layout();
+  void insert_contended_padding(LayoutRawBlock* slot);
+
+ private:
+  void prologue();
+  void epilogue();
+  void regular_field_sorting();
+  FieldGroup* get_or_create_contended_group(int g);
+};
+
+#endif // SHARE_CLASSFILE_FIELDLAYOUTBUILDER_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1799,8 +1799,8 @@
   // Create the G1ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
-  if (_cm == NULL || !_cm->completed_initialization()) {
-    vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
+  if (!_cm->completed_initialization()) {
+    vm_shutdown_during_initialization("Could not initialize G1ConcurrentMark");
     return JNI_ENOMEM;
   }
   _cm_thread = _cm->cm_thread();
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -72,11 +72,7 @@
   _cr = cr;
   _num_max_threads = num_max_threads;
 
-  _threads = NEW_C_HEAP_ARRAY_RETURN_NULL(G1ConcurrentRefineThread*, num_max_threads, mtGC);
-  if (_threads == NULL) {
-    vm_shutdown_during_initialization("Could not allocate thread holder array.");
-    return JNI_ENOMEM;
-  }
+  _threads = NEW_C_HEAP_ARRAY(G1ConcurrentRefineThread*, num_max_threads, mtGC);
 
   for (uint i = 0; i < num_max_threads; i++) {
     if (UseDynamicNumberOfGCThreads && i != 0 /* Always start first thread. */) {
@@ -303,13 +299,6 @@
                                                   yellow_zone,
                                                   red_zone,
                                                   min_yellow_zone_size);
-
-  if (cr == NULL) {
-    *ecode = JNI_ENOMEM;
-    vm_shutdown_during_initialization("Could not create G1ConcurrentRefine");
-    return NULL;
-  }
-
   *ecode = cr->initialize();
   return cr;
 }
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -138,7 +138,6 @@
   assert(last.next() == NULL, "precondition");
   BufferNode* old_tail = Atomic::xchg(&_tail, &last);
   if (old_tail == NULL) {       // Was empty.
-    assert(Atomic::load(&_head) == NULL, "invariant");
     Atomic::store(&_head, &first);
   } else {
     assert(old_tail->next() == NULL, "invariant");
@@ -146,53 +145,65 @@
   }
 }
 
-// pop gets the queue head as the candidate result (returning NULL if the
-// queue head was NULL), and then gets that result node's "next" value.  If
-// that "next" value is NULL and the queue head hasn't changed, then there
-// is only one element in the accessible part of the list (the sequence from
-// head to a node with a NULL "next" value).  We can't return that element,
-// because it may be the old tail of a concurrent push/append that has not
-// yet had its "next" field set to the new tail.  So return NULL in this case.
-// Otherwise, attempt to cmpxchg that "next" value into the queue head,
-// retrying the whole operation if that fails. This is the "usual" lock-free
-// pop from the head of a singly linked list, with the additional restriction
-// on taking the last element.
 BufferNode* G1DirtyCardQueueSet::Queue::pop() {
   Thread* current_thread = Thread::current();
   while (true) {
     // Use a critical section per iteration, rather than over the whole
-    // operation.  We're not guaranteed to make progress, because of possible
-    // contention on the queue head.  Lingering in one CS the whole time could
-    // lead to excessive allocation of buffers, because the CS blocks return
-    // of released buffers to the free list for reuse.
+    // operation.  We're not guaranteed to make progress.  Lingering in one
+    // CS could lead to excessive allocation of buffers, because the CS
+    // blocks return of released buffers to the free list for reuse.
     GlobalCounter::CriticalSection cs(current_thread);
 
     BufferNode* result = Atomic::load_acquire(&_head);
-    // Check for empty queue.  Only needs to be done on first iteration,
-    // since we never take the last element, but it's messy to make use
-    // of that and we expect one iteration to be the common case.
-    if (result == NULL) return NULL;
+    if (result == NULL) return NULL; // Queue is empty.
 
     BufferNode* next = Atomic::load_acquire(BufferNode::next_ptr(*result));
     if (next != NULL) {
-      next = Atomic::cmpxchg(&_head, result, next);
-      if (next == result) {
+      // The "usual" lock-free pop from the head of a singly linked list.
+      if (result == Atomic::cmpxchg(&_head, result, next)) {
         // Former head successfully taken; it is not the last.
         assert(Atomic::load(&_tail) != result, "invariant");
         assert(result->next() != NULL, "invariant");
         result->set_next(NULL);
         return result;
       }
-      // cmpxchg failed; try again.
-    } else if (result == Atomic::load_acquire(&_head)) {
-      // If follower of head is NULL and head hasn't changed, then only
-      // the one element is currently accessible.  We don't take the last
-      // accessible element, because there may be a concurrent add using it.
-      // The check for unchanged head isn't needed for correctness, but the
-      // retry on change may sometimes let us get a buffer after all.
-      return NULL;
+      // Lost the race; try again.
+      continue;
     }
-    // Head changed; try again.
+
+    // next is NULL.  This case is handled differently from the "usual"
+    // lock-free pop from the head of a singly linked list.
+
+    // If _tail == result then result is the only element in the list. We can
+    // remove it from the list by first setting _tail to NULL and then setting
+    // _head to NULL, the order being important.  We set _tail with cmpxchg in
+    // case of a concurrent push/append/pop also changing _tail.  If we win
+    // then we've claimed result.
+    if (Atomic::cmpxchg(&_tail, result, (BufferNode*)NULL) == result) {
+      assert(result->next() == NULL, "invariant");
+      // Now that we've claimed result, also set _head to NULL.  But we must
+      // be careful of a concurrent push/append after we NULLed _tail, since
+      // it may have already performed its list-was-empty update of _head,
+      // which we must not overwrite.
+      Atomic::cmpxchg(&_head, result, (BufferNode*)NULL);
+      return result;
+    }
+
+    // If _head != result then we lost the race to take result; try again.
+    if (result != Atomic::load_acquire(&_head)) {
+      continue;
+    }
+
+    // An in-progress concurrent operation interfered with taking the head
+    // element when it was the only element.  A concurrent pop may have won
+    // the race to clear the tail but not yet cleared the head. Alternatively,
+    // a concurrent push/append may have changed the tail but not yet linked
+    // result->next().  We cannot take result in either case.  We don't just
+    // try again, because we could spin for a long time waiting for that
+    // concurrent operation to finish.  In the first case, returning NULL is
+    // fine; we lost the race for the only element to another thread.  We
+    // also return NULL for the second case, and let the caller cope.
+    return NULL;
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -77,11 +77,8 @@
   };
 
   // A lock-free FIFO of BufferNodes, linked through their next() fields.
-  // This class has a restriction that pop() cannot return the last buffer
-  // in the queue, or what was the last buffer for a concurrent push/append
-  // operation.  It is expected that there will be a later push/append that
-  // will make that buffer available to a future pop(), or there will
-  // eventually be a complete transfer via take_all().
+  // This class has a restriction that pop() may return NULL when there are
+  // buffers in the queue if there is a concurrent push/append operation.
   class Queue {
     BufferNode* volatile _head;
     DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
@@ -105,9 +102,10 @@
     void append(BufferNode& first, BufferNode& last);
 
     // Thread-safe attempt to remove and return the first buffer in the queue.
-    // Returns NULL if the queue is empty, or if only one buffer is found.
-    // Uses GlobalCounter critical sections to address the ABA problem; this
-    // works with the buffer allocator's use of GlobalCounter synchronization.
+    // Returns NULL if the queue is empty, or if a concurrent push/append
+    // interferes.  Uses GlobalCounter critical sections to address the ABA
+    // problem; this works with the buffer allocator's use of GlobalCounter
+    // synchronization.
     BufferNode* pop();
 
     // Take all the buffers from the queue, leaving the queue empty.
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -92,14 +92,7 @@
     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
   }
 
-  _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
-                        mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
-
-  if (_fine_grain_regions == NULL) {
-    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
-                          "Failed to allocate _fine_grain_entries.");
-  }
-
+  _fine_grain_regions = NEW_C_HEAP_ARRAY(PerRegionTablePtr, _max_fine_entries, mtGC);
   for (size_t i = 0; i < _max_fine_entries; i++) {
     _fine_grain_regions[i] = NULL;
   }
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -60,8 +60,7 @@
   assert(_init_gen_size != 0, "Should have a finite size");
   _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment);
   if (!_virtual_space->expand_by(_init_gen_size)) {
-    vm_exit_during_initialization("Could not reserve enough space for "
-                                  "object heap");
+    vm_exit_during_initialization("Could not reserve enough space for object heap");
   }
 }
 
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -40,15 +40,15 @@
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
 
-PSOldGen*            ParCompactionManager::_old_gen = NULL;
+PSOldGen*               ParCompactionManager::_old_gen = NULL;
 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
 
-OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
-ParCompactionManager::ObjArrayTaskQueueSet*
-  ParCompactionManager::_objarray_queues = NULL;
+ParCompactionManager::OopTaskQueueSet*      ParCompactionManager::_oop_task_queues = NULL;
+ParCompactionManager::ObjArrayTaskQueueSet* ParCompactionManager::_objarray_task_queues = NULL;
+ParCompactionManager::RegionTaskQueueSet*   ParCompactionManager::_region_task_queues = NULL;
+
 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
-RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL;
 Monitor*                ParCompactionManager::_shadow_region_monitor = NULL;
 
@@ -77,27 +77,21 @@
   assert(_manager_array == NULL, "Attempt to initialize twice");
   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
 
-  _stack_array = new OopTaskQueueSet(parallel_gc_threads);
-  guarantee(_stack_array != NULL, "Could not allocate stack_array");
-  _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
-  guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
-  _region_array = new RegionTaskQueueSet(parallel_gc_threads);
-  guarantee(_region_array != NULL, "Could not allocate region_array");
+  _oop_task_queues = new OopTaskQueueSet(parallel_gc_threads);
+  _objarray_task_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
+  _region_task_queues = new RegionTaskQueueSet(parallel_gc_threads);
 
   // Create and register the ParCompactionManager(s) for the worker threads.
   for(uint i=0; i<parallel_gc_threads; i++) {
     _manager_array[i] = new ParCompactionManager();
-    guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
-    stack_array()->register_queue(i, _manager_array[i]->marking_stack());
-    _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
-    region_array()->register_queue(i, _manager_array[i]->region_stack());
+    oop_task_queues()->register_queue(i, _manager_array[i]->marking_stack());
+    _objarray_task_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
+    region_task_queues()->register_queue(i, _manager_array[i]->region_stack());
   }
 
   // The VMThread gets its own ParCompactionManager, which is not available
   // for work stealing.
   _manager_array[parallel_gc_threads] = new ParCompactionManager();
-  guarantee(_manager_array[parallel_gc_threads] != NULL,
-    "Could not create ParCompactionManager");
   assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
     "Not initialized?");
 
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -51,17 +51,22 @@
 
 
  private:
+  typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
+  typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
+
   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
   typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
   typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC>      ObjArrayTaskQueueSet;
   #undef QUEUE_SIZE
+  typedef OverflowTaskQueue<size_t, mtGC>             RegionTaskQueue;
+  typedef GenericTaskQueueSet<RegionTaskQueue, mtGC>  RegionTaskQueueSet;
 
   static ParCompactionManager** _manager_array;
-  static OopTaskQueueSet*       _stack_array;
-  static ObjArrayTaskQueueSet*  _objarray_queues;
+  static OopTaskQueueSet*       _oop_task_queues;
+  static ObjArrayTaskQueueSet*  _objarray_task_queues;
   static ObjectStartArray*      _start_array;
-  static RegionTaskQueueSet*    _region_array;
+  static RegionTaskQueueSet*    _region_task_queues;
   static PSOldGen*              _old_gen;
 
 private:
@@ -90,13 +95,13 @@
 
   static PSOldGen* old_gen()             { return _old_gen; }
   static ObjectStartArray* start_array() { return _start_array; }
-  static OopTaskQueueSet* stack_array()  { return _stack_array; }
+  static OopTaskQueueSet* oop_task_queues()  { return _oop_task_queues; }
 
   static void initialize(ParMarkBitMap* mbm);
 
  protected:
   // Array of task queues.  Needed by the task terminator.
-  static RegionTaskQueueSet* region_array()      { return _region_array; }
+  static RegionTaskQueueSet* region_task_queues()      { return _region_task_queues; }
   OverflowTaskQueue<oop, mtGC>*  marking_stack()       { return &_marking_stack; }
 
   // Pushes onto the marking stack.  If the marking stack is full,
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,15 +70,15 @@
 };
 
 inline bool ParCompactionManager::steal(int queue_num, oop& t) {
-  return stack_array()->steal(queue_num, t);
+  return oop_task_queues()->steal(queue_num, t);
 }
 
 inline bool ParCompactionManager::steal_objarray(int queue_num, ObjArrayTask& t) {
-  return _objarray_queues->steal(queue_num, t);
+  return _objarray_task_queues->steal(queue_num, t);
 }
 
 inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
-  return region_array()->steal(queue_num, region);
+  return region_task_queues()->steal(queue_num, region);
 }
 
 inline void ParCompactionManager::push(oop obj) {
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -135,10 +135,6 @@
   //
 
   _object_space = new MutableSpace(virtual_space()->alignment());
-
-  if (_object_space == NULL)
-    vm_exit_during_initialization("Could not allocate an old gen space");
-
   object_space()->initialize(cmr,
                              SpaceDecorator::Clear,
                              SpaceDecorator::Mangle);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2178,7 +2178,7 @@
       AbstractGangTask("MarkFromRootsTask"),
       _strong_roots_scope(active_workers),
       _subtasks(),
-      _terminator(active_workers, ParCompactionManager::stack_array()),
+      _terminator(active_workers, ParCompactionManager::oop_task_queues()),
       _active_workers(active_workers) {
     _subtasks.set_n_threads(active_workers);
     _subtasks.set_n_tasks(ParallelRootType::sentinel);
@@ -2210,7 +2210,7 @@
       AbstractGangTask("PCRefProcTask"),
       _task(task),
       _ergo_workers(ergo_workers),
-      _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
+      _terminator(_ergo_workers, ParCompactionManager::oop_task_queues()) {
   }
 
   virtual void work(uint worker_id) {
@@ -2626,7 +2626,7 @@
   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
       AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
       _tq(tq),
-      _terminator(active_workers, ParCompactionManager::region_array()),
+      _terminator(active_workers, ParCompactionManager::region_task_queues()),
       _active_workers(active_workers) {
   }
   virtual void work(uint worker_id) {
--- a/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -43,7 +43,7 @@
 #include "oops/compressedOops.inline.hpp"
 
 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
-OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
+PSPromotionManager::OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
 PreservedMarksSet*             PSPromotionManager::_preserved_marks_set = NULL;
 PSOldGen*                      PSPromotionManager::_old_gen = NULL;
 MutableSpace*                  PSPromotionManager::_young_space = NULL;
@@ -60,10 +60,8 @@
   // and make sure that the first instance starts at a cache line.
   assert(_manager_array == NULL, "Attempt to initialize twice");
   _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(promotion_manager_num);
-  guarantee(_manager_array != NULL, "Could not initialize promotion manager");
 
   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
-  guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
 
   // Create and register the PSPromotionManager(s) for the worker threads.
   for(uint i=0; i<ParallelGCThreads; i++) {
@@ -74,7 +72,6 @@
 
   assert(_preserved_marks_set == NULL, "Attempt to initialize twice");
   _preserved_marks_set = new PreservedMarksSet(true /* in_c_heap */);
-  guarantee(_preserved_marks_set != NULL, "Could not initialize preserved marks set");
   _preserved_marks_set->init(promotion_manager_num);
   for (uint i = 0; i < promotion_manager_num; i += 1) {
     _manager_array[i].register_preserved_marks(_preserved_marks_set->get(i));
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,9 @@
   friend class PSRefProcTask;
 
  private:
+  typedef OverflowTaskQueue<StarTask, mtGC>           OopStarTaskQueue;
+  typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet;
+
   static PaddedEnd<PSPromotionManager>* _manager_array;
   static OopStarTaskQueueSet*           _stack_array_depth;
   static PreservedMarksSet*             _preserved_marks_set;
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -88,10 +88,6 @@
   _from_space = new MutableSpace(virtual_space()->alignment());
   _to_space   = new MutableSpace(virtual_space()->alignment());
 
-  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
-    vm_exit_during_initialization("Could not allocate a young gen space");
-  }
-
   // Generation Counters - generation 0, 3 subspaces
   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
                                            _max_gen_size, _virtual_space);
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -168,10 +168,6 @@
   _from_space = new ContiguousSpace();
   _to_space   = new ContiguousSpace();
 
-  if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) {
-    vm_exit_during_initialization("Could not allocate a new gen space");
-  }
-
   // Compute the maximum eden and survivor space sizes. These sizes
   // are computed assuming the entire reserved space is committed.
   // These values are exported as performance counters.
--- a/src/hotspot/share/gc/shared/cardGeneration.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/cardGeneration.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -53,9 +53,6 @@
                                     heap_word_size(initial_byte_size));
   MemRegion committed_mr(start, heap_word_size(initial_byte_size));
   _rs->resize_covered_region(committed_mr);
-  if (_bts == NULL) {
-    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
-  }
 
   // Verify that the start and end of this generation is the start of a card.
   // If this wasn't true, a single card could span more than on generation,
--- a/src/hotspot/share/gc/shared/cardTable.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/cardTable.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -60,7 +60,7 @@
 
   assert(card_size <= 512, "card_size must be less than 512"); // why?
 
-  _covered   = new MemRegion[_max_covered_regions];
+  _covered = new MemRegion[_max_covered_regions];
   if (_covered == NULL) {
     vm_exit_during_initialization("Could not allocate card table covered region set.");
   }
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -579,11 +579,7 @@
   // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
   // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
   uint max_gens = 2;
-  _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(CardValue, max_gens + 1,
-                         mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
-  if (_last_cur_val_in_gen == NULL) {
-    vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
-  }
+  _last_cur_val_in_gen = NEW_C_HEAP_ARRAY(CardValue, max_gens + 1, mtGC);
   for (uint i = 0; i < max_gens + 1; i++) {
     _last_cur_val_in_gen[i] = clean_card_val();
   }
--- a/src/hotspot/share/gc/shared/generation.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/generation.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -79,9 +79,6 @@
   assert(!_reserved.is_empty(), "empty generation?");
   _span_based_discoverer.set_span(_reserved);
   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
-  if (_ref_processor == NULL) {
-    vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
-  }
 }
 
 void Generation::print() const { print_on(tty); }
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -64,9 +64,6 @@
   } else {
     _default_soft_ref_policy = new LRUCurrentHeapPolicy();
   }
-  if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
-    vm_exit_during_initialization("Could not allocate reference policy object");
-  }
   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
             RefDiscoveryPolicy == ReferentBasedDiscovery,
             "Unrecognized RefDiscoveryPolicy");
--- a/src/hotspot/share/gc/shared/taskqueue.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -443,9 +443,6 @@
   virtual bool should_exit_termination() = 0;
 };
 
-typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
-typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
-
 #ifdef _MSC_VER
 #pragma warning(push)
 // warning C4522: multiple assignment operators specified
@@ -524,10 +521,4 @@
 #pragma warning(pop)
 #endif
 
-typedef OverflowTaskQueue<StarTask, mtGC>           OopStarTaskQueue;
-typedef GenericTaskQueueSet<OopStarTaskQueue, mtGC> OopStarTaskQueueSet;
-
-typedef OverflowTaskQueue<size_t, mtGC>             RegionTaskQueue;
-typedef GenericTaskQueueSet<RegionTaskQueue, mtGC>  RegionTaskQueueSet;
-
 #endif // SHARE_GC_SHARED_TASKQUEUE_HPP
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,14 +118,14 @@
 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) {
   assert(ShenandoahLoadRefBarrier, "Should be enabled");
 
-  obj = ensure_in_register(gen, obj);
+  obj = ensure_in_register(gen, obj, T_OBJECT);
   assert(obj->is_register(), "must be a register at this point");
-  addr = ensure_in_register(gen, addr);
+  addr = ensure_in_register(gen, addr, T_ADDRESS);
   assert(addr->is_register(), "must be a register at this point");
   LIR_Opr result = gen->result_register_for(obj->value_type());
   __ move(obj, result);
-  LIR_Opr tmp1 = gen->new_register(T_OBJECT);
-  LIR_Opr tmp2 = gen->new_register(T_OBJECT);
+  LIR_Opr tmp1 = gen->new_register(T_ADDRESS);
+  LIR_Opr tmp2 = gen->new_register(T_ADDRESS);
 
   LIR_Opr thrd = gen->getThreadPointer();
   LIR_Address* active_flag_addr =
@@ -157,20 +157,14 @@
   return result;
 }
 
-LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) {
+LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) {
   if (!obj->is_register()) {
     LIR_Opr obj_reg;
     if (obj->is_constant()) {
-      obj_reg = gen->new_register(T_OBJECT);
+      obj_reg = gen->new_register(type);
       __ move(obj, obj_reg);
     } else {
-#ifdef AARCH64
-      // AArch64 expects double-size register.
       obj_reg = gen->new_pointer_register();
-#else
-      // x86 expects single-size register.
-      obj_reg = gen->new_register(T_OBJECT);
-#endif
       __ leal(obj, obj_reg);
     }
     obj = obj_reg;
@@ -180,7 +174,7 @@
 
 LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) {
   if (ShenandoahStoreValEnqueueBarrier) {
-    obj = ensure_in_register(gen, obj);
+    obj = ensure_in_register(gen, obj, T_OBJECT);
     pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj);
   }
   return obj;
@@ -221,7 +215,7 @@
       BarrierSetC1::load_at_resolved(access, result);
       LIR_OprList* args = new LIR_OprList();
       LIR_Opr addr = access.resolved_addr();
-      addr = ensure_in_register(gen, addr);
+      addr = ensure_in_register(gen, addr, T_ADDRESS);
       args->append(result);
       args->append(addr);
       BasicTypeList signature;
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -198,7 +198,7 @@
 
   LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr);
 
-  LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj);
+  LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type);
 
 public:
   ShenandoahBarrierSetC1();
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -740,6 +740,9 @@
 }
 
 Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
+  if (c == NULL) {
+    return c;
+  }
   if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
     return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
   }
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -58,6 +58,11 @@
         return false;
       }
       C->clear_major_progress();
+      if (C->range_check_cast_count() > 0) {
+        // No more loop optimizations. Remove all range check dependent CastIINodes.
+        C->remove_range_check_casts(igvn);
+        igvn.optimize();
+      }
     }
   }
   return true;
@@ -2009,21 +2014,22 @@
       if (loop != phase->ltree_root() &&
           loop->_child == NULL &&
           !loop->_irreducible) {
-        LoopNode* head = loop->_head->as_Loop();
-        if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
+        Node* head = loop->_head;
+        if (head->is_Loop() &&
+            (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
             !seen.test_set(head->_idx)) {
           IfNode* iff = find_unswitching_candidate(loop, phase);
           if (iff != NULL) {
             Node* bol = iff->in(1);
-            if (head->is_strip_mined()) {
-              head->verify_strip_mined(0);
+            if (head->as_Loop()->is_strip_mined()) {
+              head->as_Loop()->verify_strip_mined(0);
             }
             move_heap_stable_test_out_of_loop(iff, phase);
 
             AutoNodeBudget node_budget(phase);
 
             if (loop->policy_unswitching(phase)) {
-              if (head->is_strip_mined()) {
+              if (head->as_Loop()->is_strip_mined()) {
                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
               }
@@ -2291,7 +2297,12 @@
         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
           mem = in->in(TypeFunc::Memory);
         } else if (in_opc == Op_Halt) {
-          if (!in->in(0)->is_Region()) {
+          if (in->in(0)->is_Region()) {
+            Node* r = in->in(0);
+            for (uint j = 1; j < r->req(); j++) {
+              assert(r->in(j)->Opcode() != Op_NeverBranch, "");
+            }
+          } else {
             Node* proj = in->in(0);
             assert(proj->is_Proj(), "");
             Node* in = proj->in(0);
@@ -2303,25 +2314,37 @@
               assert(call->is_Call(), "");
               mem = call->in(TypeFunc::Memory);
             } else if (in->Opcode() == Op_NeverBranch) {
-              ResourceMark rm;
-              Unique_Node_List wq;
-              wq.push(in);
-              wq.push(in->as_Multi()->proj_out(0));
-              for (uint j = 1; j < wq.size(); j++) {
-                Node* c = wq.at(j);
-                assert(!c->is_Root(), "shouldn't leave loop");
-                if (c->is_SafePoint()) {
-                  assert(mem == NULL, "only one safepoint");
+              Node* head = in->in(0);
+              assert(head->is_Region() && head->req() == 3, "unexpected infinite loop graph shape");
+              assert(_phase->is_dominator(head, head->in(1)) || _phase->is_dominator(head, head->in(2)), "no back branch?");
+              Node* tail = _phase->is_dominator(head, head->in(1)) ? head->in(1) : head->in(2);
+              Node* c = tail;
+              while (c != head) {
+                if (c->is_SafePoint() && !c->is_CallLeaf()) {
                   mem = c->in(TypeFunc::Memory);
                 }
-                for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
-                  Node* u = c->fast_out(k);
-                  if (u->is_CFG()) {
-                    wq.push(u);
+                c = _phase->idom(c);
+              }
+              assert(mem != NULL, "should have found safepoint");
+
+              Node* phi_mem = NULL;
+              for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
+                Node* u = head->fast_out(j);
+                if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
+                  if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
+                    assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
+                    phi_mem = u;
+                  } else if (u->adr_type() == TypePtr::BOTTOM) {
+                    assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
+                    if (phi_mem == NULL) {
+                      phi_mem = u;
+                    }
                   }
                 }
               }
-              assert(mem != NULL, "should have found safepoint");
+              if (phi_mem != NULL) {
+                mem = phi_mem;
+              }
             }
           }
         } else {
--- a/src/hotspot/share/include/cds.h	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/include/cds.h	Wed Mar 04 12:01:01 2020 +0100
@@ -36,7 +36,7 @@
 #define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions
 #define CDS_ARCHIVE_MAGIC 0xf00baba2
 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
-#define CURRENT_CDS_ARCHIVE_VERSION 9
+#define CURRENT_CDS_ARCHIVE_VERSION 10
 #define INVALID_CDS_ARCHIVE_VERSION -1
 
 struct CDSFileMapRegion {
--- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -435,7 +435,7 @@
 
 void JfrThreadSampler::enroll() {
   if (_disenrolled) {
-    log_info(jfr)("Enrolling thread sampler");
+    log_trace(jfr)("Enrolling thread sampler");
     _sample.signal();
     _disenrolled = false;
   }
@@ -445,7 +445,7 @@
   if (!_disenrolled) {
     _sample.wait();
     _disenrolled = true;
-    log_info(jfr)("Disenrolling thread sampler");
+    log_trace(jfr)("Disenrolling thread sampler");
   }
 }
 
@@ -583,12 +583,12 @@
 }
 
 static void log(size_t interval_java, size_t interval_native) {
-  log_info(jfr)("Updated thread sampler for java: " SIZE_FORMAT "  ms, native " SIZE_FORMAT " ms", interval_java, interval_native);
+  log_trace(jfr)("Updated thread sampler for java: " SIZE_FORMAT "  ms, native " SIZE_FORMAT " ms", interval_java, interval_native);
 }
 
 void JfrThreadSampling::start_sampler(size_t interval_java, size_t interval_native) {
   assert(_sampler == NULL, "invariant");
-  log_info(jfr)("Enrolling thread sampler");
+  log_trace(jfr)("Enrolling thread sampler");
   _sampler = new JfrThreadSampler(interval_java, interval_native, JfrOptionSet::stackdepth());
   _sampler->start_thread();
   _sampler->enroll();
@@ -608,7 +608,7 @@
   }
   if (interval_java > 0 || interval_native > 0) {
     if (_sampler == NULL) {
-      log_info(jfr)("Creating thread sampler for java:%zu ms, native %zu ms", interval_java, interval_native);
+      log_trace(jfr)("Creating thread sampler for java:%zu ms, native %zu ms", interval_java, interval_native);
       start_sampler(interval_java, interval_native);
     } else {
       _sampler->set_java_interval(interval_java);
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1461,7 +1461,7 @@
   JVMCIENV->HotSpotStackFrameReference_initialize(JVMCI_CHECK);
 
   // look for the given stack frame
-  StackFrameStream fst(thread);
+  StackFrameStream fst(thread, false);
   intptr_t* stack_pointer = (intptr_t*) JVMCIENV->get_HotSpotStackFrameReference_stackPointer(hs_frame);
   while (fst.current()->sp() != stack_pointer && !fst.is_done()) {
     fst.next();
@@ -1477,7 +1477,7 @@
     assert(fst.current()->cb()->is_nmethod(), "nmethod expected");
     ((nmethod*) fst.current()->cb())->make_not_entrant();
   }
-  Deoptimization::deoptimize(thread, *fst.current(), fst.register_map(), Deoptimization::Reason_none);
+  Deoptimization::deoptimize(thread, *fst.current(), Deoptimization::Reason_none);
   // look for the frame again as it has been updated by deopt (pc, deopt state...)
   StackFrameStream fstAfterDeopt(thread);
   while (fstAfterDeopt.current()->sp() != stack_pointer && !fstAfterDeopt.is_done()) {
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -157,7 +157,7 @@
   nonstatic_field(InstanceKlass,               _constants,                                    ConstantPool*)                         \
   nonstatic_field(InstanceKlass,               _source_file_name_index,                       u2)                                    \
   nonstatic_field(InstanceKlass,               _init_state,                                   u1)                                    \
-  nonstatic_field(InstanceKlass,               _misc_flags,                                   u2)                                    \
+  nonstatic_field(InstanceKlass,               _misc_flags,                                   u4)                                    \
   nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
                                                                                                                                      \
   volatile_nonstatic_field(JavaFrameAnchor,    _last_Java_sp,                                 intptr_t*)                             \
--- a/src/hotspot/share/memory/filemap.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/memory/filemap.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -210,6 +210,8 @@
   _narrow_oop_mode = CompressedOops::mode();
   _narrow_oop_base = CompressedOops::base();
   _narrow_oop_shift = CompressedOops::shift();
+  _compressed_oops = UseCompressedOops;
+  _compressed_class_ptrs = UseCompressedClassPointers;
   _max_heap_size = MaxHeapSize;
   _narrow_klass_shift = CompressedKlassPointers::shift();
   if (HeapShared::is_heap_object_archiving_allowed()) {
@@ -2041,6 +2043,14 @@
             "for testing purposes only and should not be used in a production environment");
   }
 
+  log_info(cds)("Archive was created with UseCompressedOops = %d, UseCompressedClassPointers = %d",
+                          compressed_oops(), compressed_class_pointers());
+  if (compressed_oops() != UseCompressedOops || compressed_class_pointers() != UseCompressedClassPointers) {
+    FileMapInfo::fail_continue("Unable to use shared archive.\nThe saved state of UseCompressedOops and UseCompressedClassPointers is "
+                               "different from runtime, CDS will be disabled.");
+    return false;
+  }
+
   return true;
 }
 
--- a/src/hotspot/share/memory/filemap.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/memory/filemap.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -187,6 +187,8 @@
   uintx  _max_heap_size;            // java max heap size during dumping
   CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
   int     _narrow_klass_shift;      // save narrow klass base and shift
+  bool    _compressed_oops;         // save the flag UseCompressedOops
+  bool    _compressed_class_ptrs;   // save the flag UseCompressedClassPointers
   size_t  _cloned_vtables_offset;   // The address of the first cloned vtable
   size_t  _serialized_data_offset;  // Data accessed using {ReadClosure,WriteClosure}::serialize()
   size_t  _i2i_entry_code_buffers_offset;
@@ -264,7 +266,8 @@
   char* mapped_base_address()              const { return _mapped_base_address; }
   bool has_platform_or_app_classes()       const { return _has_platform_or_app_classes; }
   size_t ptrmap_size_in_bits()             const { return _ptrmap_size_in_bits; }
-
+  bool compressed_oops()                   const { return _compressed_oops; }
+  bool compressed_class_pointers()         const { return _compressed_class_ptrs; }
   // FIXME: These should really return int
   jshort max_used_path_index()             const { return _max_used_path_index; }
   jshort app_module_paths_start_index()    const { return _app_module_paths_start_index; }
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -294,23 +294,21 @@
   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts,
   //   then the RO parts.
 
-  assert(UseCompressedOops && UseCompressedClassPointers,
-      "UseCompressedOops and UseCompressedClassPointers must be set");
-
   size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
   CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
   _shared_rs = _shared_rs.first_part(max_archive_size);
 
-  // Set up compress class pointers.
-  CompressedKlassPointers::set_base((address)_shared_rs.base());
-  // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
-  // with AOT.
-  CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
-  // Set the range of klass addresses to 4GB.
-  CompressedKlassPointers::set_range(cds_total);
-
-  Metaspace::initialize_class_space(tmp_class_space);
+  if (UseCompressedClassPointers) {
+    // Set up compress class pointers.
+    CompressedKlassPointers::set_base((address)_shared_rs.base());
+    // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
+    // with AOT.
+    CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
+    // Set the range of klass addresses to 4GB.
+    CompressedKlassPointers::set_range(cds_total);
+    Metaspace::initialize_class_space(tmp_class_space);
+  }
   log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
                 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
 
@@ -2181,8 +2179,8 @@
           // map_heap_regions() compares the current narrow oop and klass encodings
           // with the archived ones, so it must be done after all encodings are determined.
           static_mapinfo->map_heap_regions();
+          CompressedKlassPointers::set_range(CompressedClassSpaceSize);
         }
-        CompressedKlassPointers::set_range(CompressedClassSpaceSize);
       });
   } else {
     unmap_archive(static_mapinfo);
--- a/src/hotspot/share/oops/fieldStreams.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/fieldStreams.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,7 +47,6 @@
   fieldDescriptor     _fd_buf;
 
   FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
-  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
 
   int init_generic_signature_start_slot() {
     int length = _fields->length();
@@ -87,6 +86,7 @@
 
   // accessors
   int index() const                 { return _index; }
+  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
 
   void next() {
     if (access_flags().field_has_generic_signature()) {
--- a/src/hotspot/share/oops/instanceKlass.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1399,6 +1399,10 @@
   oop_map_cache->lookup(method, bci, entry_for);
 }
 
+bool InstanceKlass::contains_field_offset(int offset) {
+  fieldDescriptor fd;
+  return find_field_from_offset(offset, false, &fd);
+}
 
 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
--- a/src/hotspot/share/oops/instanceKlass.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -103,12 +103,28 @@
   uint count() const         { return _count; }
   void set_count(uint count) { _count = count; }
 
+  void increment_count(int diff) { _count += diff; }
+
+  int offset_span() const { return _count * heapOopSize; }
+
+  int end_offset() const {
+    return offset() + offset_span();
+  }
+
+  bool is_contiguous(int another_offset) const {
+    return another_offset == end_offset();
+  }
+
   // sizeof(OopMapBlock) in words.
   static const int size_in_words() {
     return align_up((int)sizeof(OopMapBlock), wordSize) >>
       LogBytesPerWord;
   }
 
+  static int compare_offset(const OopMapBlock* a, const OopMapBlock* b) {
+    return a->offset() - b->offset();
+  }
+
  private:
   int  _offset;
   uint _count;
@@ -212,7 +228,6 @@
   // _is_marked_dependent can be set concurrently, thus cannot be part of the
   // _misc_flags.
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
-  bool            _is_being_redefined;   // used for locking redefinition
 
   // The low two bits of _misc_flags contains the kind field.
   // This can be used to quickly discriminate among the four kinds of
@@ -243,12 +258,14 @@
     _misc_is_shared_boot_class                = 1 << 12, // defining class loader is boot class loader
     _misc_is_shared_platform_class            = 1 << 13, // defining class loader is platform class loader
     _misc_is_shared_app_class                 = 1 << 14, // defining class loader is app class loader
-    _misc_has_resolved_methods                = 1 << 15  // resolved methods table entries added for this class
+    _misc_has_resolved_methods                = 1 << 15, // resolved methods table entries added for this class
+    _misc_is_being_redefined                  = 1 << 16, // used for locking redefinition
+    _misc_has_contended_annotations           = 1 << 17  // has @Contended annotation
   };
   u2 loader_type_bits() {
     return _misc_is_shared_boot_class|_misc_is_shared_platform_class|_misc_is_shared_app_class;
   }
-  u2              _misc_flags;
+  u4              _misc_flags;
   u2              _minor_version;        // minor version number of class file
   u2              _major_version;        // major version number of class file
   Thread*         _init_thread;          // Pointer to current thread doing initialization (to handle recursive initialization)
@@ -571,9 +588,7 @@
   Klass* find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const;
 
   // find a non-static or static field given its offset within the class.
-  bool contains_field_offset(int offset) {
-    return instanceOopDesc::contains_field_offset(offset, nonstatic_field_size());
-  }
+  bool contains_field_offset(int offset);
 
   bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
   bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
@@ -735,10 +750,29 @@
     _nonstatic_oop_map_size = words;
   }
 
+  bool has_contended_annotations() const {
+    return ((_misc_flags & _misc_has_contended_annotations) != 0);
+  }
+  void set_has_contended_annotations(bool value)  {
+    if (value) {
+      _misc_flags |= _misc_has_contended_annotations;
+    } else {
+      _misc_flags &= ~_misc_has_contended_annotations;
+    }
+  }
+
 #if INCLUDE_JVMTI
   // Redefinition locking.  Class can only be redefined by one thread at a time.
-  bool is_being_redefined() const          { return _is_being_redefined; }
-  void set_is_being_redefined(bool value)  { _is_being_redefined = value; }
+  bool is_being_redefined() const          {
+    return ((_misc_flags & _misc_is_being_redefined) != 0);
+  }
+  void set_is_being_redefined(bool value)  {
+    if (value) {
+      _misc_flags |= _misc_is_being_redefined;
+    } else {
+      _misc_flags &= ~_misc_is_being_redefined;
+    }
+  }
 
   // RedefineClasses() support for previous versions:
   void add_previous_version(InstanceKlass* ik, int emcp_method_count);
--- a/src/hotspot/share/oops/instanceOop.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/instanceOop.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,12 +43,6 @@
              klass_gap_offset_in_bytes() :
              sizeof(instanceOopDesc);
   }
-
-  static bool contains_field_offset(int offset, int nonstatic_field_size) {
-    int base_in_bytes = base_offset_in_bytes();
-    return (offset >= base_in_bytes &&
-            (offset-base_in_bytes) < nonstatic_field_size * heapOopSize);
-  }
 };
 
 #endif // SHARE_OOPS_INSTANCEOOP_HPP
--- a/src/hotspot/share/oops/method.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/method.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -811,7 +811,13 @@
     for (int i = 0; i < length; i++) {
       CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
       Klass* k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
-      assert(k->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
+      if (log_is_enabled(Warning, exceptions) &&
+          !k->is_subclass_of(SystemDictionary::Throwable_klass())) {
+        ResourceMark rm(THREAD);
+        log_warning(exceptions)(
+          "Class %s in throws clause of method %s is not a subtype of class java.lang.Throwable",
+          k->external_name(), method->external_name());
+      }
       mirrors->obj_at_put(i, k->java_mirror());
     }
     return mirrors;
--- a/src/hotspot/share/oops/objArrayKlass.inline.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/oops/objArrayKlass.inline.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -101,7 +101,7 @@
 // for objArrayOops.
 template <typename T, class OopClosureType>
 void ObjArrayKlass::oop_oop_iterate_range(objArrayOop a, OopClosureType* closure, int start, int end) {
-  T* low = start == 0 ? cast_from_oop<T*>(a) : a->obj_at_addr_raw<T>(start);
+  T* low = (T*)a->base_raw() + start;
   T* high = (T*)a->base_raw() + end;
 
   oop_oop_iterate_elements_bounded<T>(a, closure, low, high);
--- a/src/hotspot/share/opto/c2_globals.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/c2_globals.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -611,12 +611,6 @@
   develop(bool, ConvertFloat2IntClipping, true,                             \
           "Convert float2int clipping idiom to integer clipping")           \
                                                                             \
-  develop(bool, Use24BitFPMode, true,                                       \
-          "Set 24-bit FPU mode on a per-compile basis ")                    \
-                                                                            \
-  develop(bool, Use24BitFP, true,                                           \
-          "use FP instructions that produce 24-bit precise results")        \
-                                                                            \
   develop(bool, MonomorphicArrayCheck, true,                                \
           "Uncommon-trap array store checks that require full type check")  \
                                                                             \
--- a/src/hotspot/share/opto/compile.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/compile.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1094,7 +1094,7 @@
   _matcher = NULL;  // filled in later
   _cfg     = NULL;  // filled in later
 
-  set_24_bit_selection_and_mode(Use24BitFP, false);
+  IA32_ONLY( set_24_bit_selection_and_mode(true, false); )
 
   _node_note_array = NULL;
   _default_node_notes = NULL;
@@ -3713,14 +3713,16 @@
     }
   }
 
+#ifdef IA32
   // If original bytecodes contained a mixture of floats and doubles
   // check if the optimizer has made it homogenous, item (3).
-  if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
+  if (UseSSE == 0 &&
       frc.get_float_count() > 32 &&
       frc.get_double_count() == 0 &&
       (10 * frc.get_call_count() < frc.get_float_count()) ) {
-    set_24_bit_selection_and_mode( false,  true );
+    set_24_bit_selection_and_mode(false, true);
   }
+#endif // IA32
 
   set_java_calls(frc.get_java_call_count());
   set_inner_loops(frc.get_inner_loop_count());
--- a/src/hotspot/share/opto/compile.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/compile.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -582,8 +582,6 @@
  private:
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*             _cfg;                   // Results of CFG finding
-  bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
-  bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
   int                   _java_calls;            // Number of java calls in the method
   int                   _inner_loops;           // Number of inner loops in the method
   Matcher*              _matcher;               // Engine to map ideal to machine instructions
@@ -1122,8 +1120,6 @@
 
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*         cfg()                       { return _cfg; }
-  bool              select_24_bit_instr() const { return _select_24_bit_instr; }
-  bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
   bool              has_java_calls() const      { return _java_calls > 0; }
   int               java_calls() const          { return _java_calls; }
   int               inner_loops() const         { return _inner_loops; }
@@ -1155,12 +1151,6 @@
   void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
   void          set_indexSet_free_block_list(void* p)   { _indexSet_free_block_list = p; }
 
-  // Remember if this compilation changes hardware mode to 24-bit precision
-  void set_24_bit_selection_and_mode(bool selection, bool mode) {
-    _select_24_bit_instr = selection;
-    _in_24_bit_fp_mode   = mode;
-  }
-
   void  set_java_calls(int z) { _java_calls  = z; }
   void set_inner_loops(int z) { _inner_loops = z; }
 
@@ -1413,6 +1403,22 @@
   bool needs_clinit_barrier(ciField* ik,         ciMethod* accessing_method);
   bool needs_clinit_barrier(ciMethod* ik,        ciMethod* accessing_method);
   bool needs_clinit_barrier(ciInstanceKlass* ik, ciMethod* accessing_method);
+
+#ifdef IA32
+ private:
+  bool _select_24_bit_instr;   // We selected an instruction with a 24-bit result
+  bool _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
+
+  // Remember if this compilation changes hardware mode to 24-bit precision.
+  void set_24_bit_selection_and_mode(bool selection, bool mode) {
+    _select_24_bit_instr = selection;
+    _in_24_bit_fp_mode   = mode;
+  }
+
+ public:
+  bool select_24_bit_instr() const { return _select_24_bit_instr; }
+  bool in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
+#endif // IA32
 };
 
 #endif // SHARE_OPTO_COMPILE_HPP
--- a/src/hotspot/share/opto/doCall.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/doCall.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -150,8 +150,9 @@
     return cg;
   }
 
-  // Do not inline strict fp into non-strict code, or the reverse
-  if (caller->is_strict() ^ callee->is_strict()) {
+  // If explicit rounding is required, do not inline strict into non-strict code (or the reverse).
+  if (Matcher::strict_fp_requires_explicit_rounding &&
+      caller->is_strict() != callee->is_strict()) {
     allow_inline = false;
   }
 
--- a/src/hotspot/share/opto/graphKit.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/graphKit.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2142,22 +2142,6 @@
 }
 
 
-void GraphKit::round_double_arguments(ciMethod* dest_method) {
-  // (Note:  TypeFunc::make has a cache that makes this fast.)
-  const TypeFunc* tf    = TypeFunc::make(dest_method);
-  int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
-  for (int j = 0; j < nargs; j++) {
-    const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
-    if( targ->basic_type() == T_DOUBLE ) {
-      // If any parameters are doubles, they must be rounded before
-      // the call, dstore_rounding does gvn.transform
-      Node *arg = argument(j);
-      arg = dstore_rounding(arg);
-      set_argument(j, arg);
-    }
-  }
-}
-
 /**
  * Record profiling data exact_kls for Node n with the type system so
  * that it can propagate it (speculation)
@@ -2323,43 +2307,80 @@
 }
 
 void GraphKit::round_double_result(ciMethod* dest_method) {
-  // A non-strict method may return a double value which has an extended
-  // exponent, but this must not be visible in a caller which is 'strict'
-  // If a strict caller invokes a non-strict callee, round a double result
-
-  BasicType result_type = dest_method->return_type()->basic_type();
-  assert( method() != NULL, "must have caller context");
-  if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
-    // Destination method's return value is on top of stack
-    // dstore_rounding() does gvn.transform
-    Node *result = pop_pair();
-    result = dstore_rounding(result);
-    push_pair(result);
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+    // If a strict caller invokes a non-strict callee, round a double result.
+    // A non-strict method may return a double value which has an extended exponent,
+    // but this must not be visible in a caller which is strict.
+    BasicType result_type = dest_method->return_type()->basic_type();
+    assert(method() != NULL, "must have caller context");
+    if( result_type == T_DOUBLE && method()->is_strict() && !dest_method->is_strict() ) {
+      // Destination method's return value is on top of stack
+      // dstore_rounding() does gvn.transform
+      Node *result = pop_pair();
+      result = dstore_rounding(result);
+      push_pair(result);
+    }
   }
 }
 
+void GraphKit::round_double_arguments(ciMethod* dest_method) {
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+    // (Note:  TypeFunc::make has a cache that makes this fast.)
+    const TypeFunc* tf    = TypeFunc::make(dest_method);
+    int             nargs = tf->domain()->cnt() - TypeFunc::Parms;
+    for (int j = 0; j < nargs; j++) {
+      const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
+      if (targ->basic_type() == T_DOUBLE) {
+        // If any parameters are doubles, they must be rounded before
+        // the call, dstore_rounding does gvn.transform
+        Node *arg = argument(j);
+        arg = dstore_rounding(arg);
+        set_argument(j, arg);
+      }
+    }
+  }
+}
+
 // rounding for strict float precision conformance
 Node* GraphKit::precision_rounding(Node* n) {
-  return UseStrictFP && _method->flags().is_strict()
-    && UseSSE == 0 && Matcher::strict_fp_requires_explicit_rounding
-    ? _gvn.transform( new RoundFloatNode(0, n) )
-    : n;
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    if (_method->flags().is_strict() && UseSSE == 0) {
+      return _gvn.transform(new RoundFloatNode(0, n));
+    }
+#else
+    Unimplemented();
+#endif // IA32
+  }
+  return n;
 }
 
 // rounding for strict double precision conformance
 Node* GraphKit::dprecision_rounding(Node *n) {
-  return UseStrictFP && _method->flags().is_strict()
-    && UseSSE <= 1 && Matcher::strict_fp_requires_explicit_rounding
-    ? _gvn.transform( new RoundDoubleNode(0, n) )
-    : n;
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    if (_method->flags().is_strict() && UseSSE < 2) {
+      return _gvn.transform(new RoundDoubleNode(0, n));
+    }
+#else
+    Unimplemented();
+#endif // IA32
+  }
+  return n;
 }
 
 // rounding for non-strict double stores
 Node* GraphKit::dstore_rounding(Node* n) {
-  return Matcher::strict_fp_requires_explicit_rounding
-    && UseSSE <= 1
-    ? _gvn.transform( new RoundDoubleNode(0, n) )
-    : n;
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    if (UseSSE < 2) {
+      return _gvn.transform(new RoundDoubleNode(0, n));
+    }
+#else
+    Unimplemented();
+#endif // IA32
+  }
+  return n;
 }
 
 //=============================================================================
--- a/src/hotspot/share/opto/library_call.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/library_call.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1785,8 +1785,15 @@
 //--------------------------round_double_node--------------------------------
 // Round a double node if necessary.
 Node* LibraryCallKit::round_double_node(Node* n) {
-  if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1)
-    n = _gvn.transform(new RoundDoubleNode(0, n));
+  if (Matcher::strict_fp_requires_explicit_rounding) {
+#ifdef IA32
+    if (UseSSE < 2) {
+      n = _gvn.transform(new RoundDoubleNode(NULL, n));
+    }
+#else
+    Unimplemented();
+#endif // IA32
+  }
   return n;
 }
 
@@ -4845,8 +4852,6 @@
 
   // This arraycopy must unconditionally follow the allocation of the ptr.
   Node* alloc_ctl = ptr->in(0);
-  assert(just_allocated_object(alloc_ctl) == ptr, "most recent allo");
-
   Node* ctl = control();
   while (ctl != alloc_ctl) {
     // There may be guards which feed into the slow_region.
--- a/src/hotspot/share/opto/loopopts.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/loopopts.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -898,6 +898,7 @@
             // Move the store out of the loop if the LCA of all
             // users (except for the phi) is outside the loop.
             Node* hook = new Node(1);
+            hook->init_req(0, n_ctrl); // Add an input to prevent hook from being dead
             _igvn.rehash_node_delayed(phi);
             int count = phi->replace_edge(n, hook);
             assert(count > 0, "inconsistent phi");
--- a/src/hotspot/share/opto/matcher.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/matcher.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2010,6 +2010,15 @@
 }
 #endif // X86
 
+bool Matcher::is_vshift_con_pattern(Node *n, Node *m) {
+  if (n != NULL && m != NULL) {
+    return VectorNode::is_vector_shift(n) &&
+           VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
+  }
+  return false;
+}
+
+
 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
   Node *off = m->in(AddPNode::Offset);
   if (off->is_Con()) {
@@ -2090,6 +2099,10 @@
           continue;
         }
 #endif
+        if (is_vshift_con_pattern(n, m)) {
+          mstack.push(m, Visit);
+          continue;
+        }
 
         // Clone addressing expressions as they are "free" in memory access instructions
         if (mem_op && i == mem_addr_idx && mop == Op_AddP &&
@@ -2525,22 +2538,16 @@
 //----------------------------------------------------------------------
 
 // Convert (leg)Vec to (leg)Vec[SDXYZ].
-MachOper* Matcher::specialize_vector_operand_helper(MachNode* m, uint opnd_idx, const Type* t) {
+MachOper* Matcher::specialize_vector_operand_helper(MachNode* m, uint opnd_idx, const TypeVect* vt) {
   MachOper* original_opnd = m->_opnds[opnd_idx];
-  uint ideal_reg = t->ideal_reg();
+  uint ideal_reg = vt->ideal_reg();
   // Handle special cases.
-  if (t->isa_vect()) {
-    // LShiftCntV/RShiftCntV report wide vector type, but Matcher::vector_shift_count_ideal_reg() as ideal register (see vectornode.hpp).
-    // Look for shift count use sites as well (at vector shift nodes).
-    int opc = m->ideal_Opcode();
-    if ((VectorNode::is_shift_count(opc)  && opnd_idx == 0) || // DEF operand of LShiftCntV/RShiftCntV
-        (VectorNode::is_vector_shift(opc) && opnd_idx == 2)) { // shift operand of a vector shift node
-      ideal_reg = Matcher::vector_shift_count_ideal_reg(t->is_vect()->length_in_bytes());
-    }
-  } else {
-    // Chain instructions which convert scalar to vector (e.g., vshiftcntimm on x86) don't have vector type.
-    int size_in_bytes = 4 * type2size[t->basic_type()];
-    ideal_reg = Matcher::vector_ideal_reg(size_in_bytes);
+  // LShiftCntV/RShiftCntV report wide vector type, but Matcher::vector_shift_count_ideal_reg() as ideal register (see vectornode.hpp).
+  // Look for shift count use sites as well (at vector shift nodes).
+  int opc = m->ideal_Opcode();
+  if ((VectorNode::is_vector_shift_count(opc)  && opnd_idx == 0) || // DEF operand of LShiftCntV/RShiftCntV
+      (VectorNode::is_vector_shift(opc)        && opnd_idx == 2)) { // shift operand of a vector shift node
+    ideal_reg = Matcher::vector_shift_count_ideal_reg(vt->length_in_bytes());
   }
   return Matcher::specialize_generic_vector_operand(original_opnd, ideal_reg, false);
 }
@@ -2575,7 +2582,7 @@
       }
     }
   }
-  return specialize_vector_operand_helper(m, opnd_idx, def->bottom_type());
+  return specialize_vector_operand_helper(m, opnd_idx, def->bottom_type()->is_vect());
 }
 
 void Matcher::specialize_mach_node(MachNode* m) {
--- a/src/hotspot/share/opto/matcher.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/matcher.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -125,6 +125,8 @@
   bool is_bmi_pattern(Node *n, Node *m);
 #endif
 
+  bool is_vshift_con_pattern(Node *n, Node *m);
+
   // Debug and profile information for nodes in old space:
   GrowableArray<Node_Notes*>* _old_node_note_array;
 
@@ -517,7 +519,7 @@
   void specialize_mach_node(MachNode* m);
   void specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx);
   MachOper* specialize_vector_operand(MachNode* m, uint opnd_idx);
-  MachOper* specialize_vector_operand_helper(MachNode* m, uint opnd_idx, const Type* t);
+  MachOper* specialize_vector_operand_helper(MachNode* m, uint opnd_idx, const TypeVect* vt);
 
   static MachOper* specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp);
 
@@ -534,8 +536,7 @@
   // on windows95 to take care of some unusual register constraints.
   void pd_implicit_null_fixup(MachNode *load, uint idx);
 
-  // Advertise here if the CPU requires explicit rounding operations
-  // to implement the UseStrictFP mode.
+  // Advertise here if the CPU requires explicit rounding operations to implement strictfp mode.
   static const bool strict_fp_requires_explicit_rounding;
 
   // Are floats conerted to double when stored to stack during deoptimization?
--- a/src/hotspot/share/opto/node.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/node.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2123,7 +2123,8 @@
       }
       assert( cnt == 0,"Mismatched edge count.");
     } else if (n == NULL) {
-      assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
+      assert(i >= req() || i == 0 || is_Region() || is_Phi() || is_ArrayCopy()
+              || (is_Unlock() && i == req()-1), "only region, phi, arraycopy or unlock nodes have null data edges");
     } else {
       assert(n->is_top(), "sanity");
       // Nothing to check.
@@ -2137,9 +2138,6 @@
   }
 }
 
-//------------------------------verify_recur-----------------------------------
-static const Node *unique_top = NULL;
-
 void Node::verify_recur(const Node *n, int verify_depth,
                         VectorSet &old_space, VectorSet &new_space) {
   if ( verify_depth == 0 )  return;
--- a/src/hotspot/share/opto/superword.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/superword.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -2706,13 +2706,11 @@
           NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");})
           return NULL;
         }
-        // Move non constant shift count into vector register.
-        cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
       }
-      if (cnt != opd) {
-        _igvn.register_new_node_with_optimizer(cnt);
-        _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
-      }
+      // Move shift count into vector register.
+      cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
+      _igvn.register_new_node_with_optimizer(cnt);
+      _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
       return cnt;
     }
     assert(!opd->is_StoreVector(), "such vector is not expected here");
--- a/src/hotspot/share/opto/vectornode.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/vectornode.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -467,7 +467,7 @@
 }
 
 VectorNode* VectorNode::shift_count(Node* shift, Node* cnt, uint vlen, BasicType bt) {
-  assert(VectorNode::is_shift(shift) && !cnt->is_Con(), "only variable shift count");
+  assert(VectorNode::is_shift(shift), "sanity");
   // Match shift count type with shift vector type.
   const TypeVect* vt = TypeVect::make(bt, vlen);
   switch (shift->Opcode()) {
@@ -506,7 +506,7 @@
   }
 }
 
-bool VectorNode::is_shift_count(int opc) {
+bool VectorNode::is_vector_shift_count(int opc) {
   assert(opc > _last_machine_leaf && opc < _last_opcode, "invalid opcode");
   switch (opc) {
   case Op_RShiftCntV:
--- a/src/hotspot/share/opto/vectornode.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/opto/vectornode.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -76,7 +76,14 @@
   static void vector_operands(Node* n, uint* start, uint* end);
 
   static bool is_vector_shift(int opc);
-  static bool is_shift_count(int opc);
+  static bool is_vector_shift_count(int opc);
+
+  static bool is_vector_shift(Node* n) {
+    return is_vector_shift(n->Opcode());
+  }
+  static bool is_vector_shift_count(Node* n) {
+    return is_vector_shift_count(n->Opcode());
+  }
 };
 
 //===========================Vector=ALU=Operations=============================
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/prims/whitebox.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -834,11 +834,10 @@
   void doit() {
     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
       if (t->has_last_Java_frame()) {
-        for (StackFrameStream fst(t, UseBiasedLocking); !fst.is_done(); fst.next()) {
+        for (StackFrameStream fst(t, false); !fst.is_done(); fst.next()) {
           frame* f = fst.current();
           if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
-            RegisterMap* reg_map = fst.register_map();
-            Deoptimization::deoptimize(t, *f, reg_map);
+            Deoptimization::deoptimize(t, *f);
             if (_make_not_entrant) {
                 CompiledMethod* cm = CodeCache::find_compiled(f->pc());
                 assert(cm != NULL, "sanity check");
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -522,6 +522,8 @@
   { "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
   { "FlightRecorder",               JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
   { "MonitorBound",                 JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
+  { "PrintVMQWaitTime",             JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
+  { "UseNewFieldLayout",            JDK_Version::jdk(15), JDK_Version::jdk(16), JDK_Version::jdk(17) },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
   { "DefaultMaxRAMFraction",        JDK_Version::jdk(8),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -3496,16 +3498,6 @@
       warning("Cannot dump shared archive while using shared archive");
     }
     UseSharedSpaces = false;
-#ifdef _LP64
-    if (!UseCompressedOops || !UseCompressedClassPointers) {
-      vm_exit_during_initialization(
-        "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
-    }
-  } else {
-    if (!UseCompressedOops || !UseCompressedClassPointers) {
-      no_shared_spaces("UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.");
-    }
-#endif
   }
 }
 
--- a/src/hotspot/share/runtime/deoptimization.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -1156,18 +1156,18 @@
 // Restore fields of an eliminated instance object using the same field order
 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
-  if (klass->superklass() != NULL) {
-    svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal);
-  }
-
   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
-  for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
-    if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
-      ReassignedField field;
-      field._offset = fs.offset();
-      field._type = Signature::basic_type(fs.signature());
-      fields->append(field);
+  InstanceKlass* ik = klass;
+  while (ik != NULL) {
+    for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
+      if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
+        ReassignedField field;
+        field._offset = fs.offset();
+        field._type = Signature::basic_type(fs.signature());
+        fields->append(field);
+      }
     }
+    ik = ik->superklass();
   }
   fields->sort(compare);
   for (int i = 0; i < fields->length(); i++) {
@@ -1516,7 +1516,7 @@
   fr.deoptimize(thread);
 }
 
-void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) {
+void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
   // Deoptimize only if the frame comes from compile code.
   // Do not deoptimize the frame which is already patched
   // during the execution of the loops below.
@@ -1534,15 +1534,15 @@
   cm->make_not_entrant();
 
   // Use Deoptimization::deoptimize for all of its side-effects:
-  // revoking biases of monitors, gathering traps statistics, logging...
+  // gathering traps statistics, logging...
   // it also patches the return pc but we do not care about that
   // since we return a continuation to the deopt_blob below.
   JavaThread* thread = JavaThread::current();
-  RegisterMap reg_map(thread, UseBiasedLocking);
+  RegisterMap reg_map(thread, false);
   frame runtime_frame = thread->last_frame();
   frame caller_frame = runtime_frame.sender(&reg_map);
   assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
-  Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
+  Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
 
   MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true);
   if (trap_mdo != NULL) {
@@ -1557,12 +1557,12 @@
   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
          "can only deoptimize other thread at a safepoint");
   // Compute frame and register map based on thread and sp.
-  RegisterMap reg_map(thread, UseBiasedLocking);
+  RegisterMap reg_map(thread, false);
   frame fr = thread->last_frame();
   while (fr.id() != id) {
     fr = fr.sender(&reg_map);
   }
-  deoptimize(thread, fr, &reg_map, reason);
+  deoptimize(thread, fr, reason);
 }
 
 
--- a/src/hotspot/share/runtime/deoptimization.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/deoptimization.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -148,8 +148,8 @@
   static void revoke_from_deopt_handler(JavaThread* thread, frame fr, RegisterMap* map);
 
  public:
-  // Deoptimizes a frame lazily. nmethod gets patched deopt happens on return to the frame
-  static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason = Reason_constraint);
+  // Deoptimizes a frame lazily. Deopt happens on return to the frame.
+  static void deoptimize(JavaThread* thread, frame fr, DeoptReason reason = Reason_constraint);
 
 #if INCLUDE_JVMCI
   static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
--- a/src/hotspot/share/runtime/globals.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/globals.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -267,7 +267,7 @@
           "compilation")                                                    \
                                                                             \
   product(bool, PrintVMQWaitTime, false,                                    \
-          "Print out the waiting time in VM operation queue")               \
+          "(Deprecated) Print out the waiting time in VM operation queue")  \
                                                                             \
   product(bool, MethodFlushing, true,                                       \
           "Reclamation of zombie and not-entrant methods")                  \
@@ -994,9 +994,6 @@
           "proper StackOverflow handling; disable only to measure cost "    \
           "of stackbanging)")                                               \
                                                                             \
-  develop(bool, UseStrictFP, true,                                          \
-          "use strict fp if modifier strictfp is set")                      \
-                                                                            \
   develop(bool, GenerateSynchronizationCode, true,                          \
           "generate locking/unlocking code for synchronized methods and "   \
           "monitors")                                                       \
@@ -2488,7 +2485,15 @@
           "Start flight recording with options"))                           \
                                                                             \
   experimental(bool, UseFastUnorderedTimeStamps, false,                     \
-          "Use platform unstable time where supported for timestamps only")
+          "Use platform unstable time where supported for timestamps only") \
+                                                                            \
+  product(bool, UseNewFieldLayout, true,                                    \
+               "(Deprecated) Use new algorithm to compute field layouts")   \
+                                                                            \
+  product(bool, UseEmptySlotsInSupers, true,                                \
+                "Allow allocating fields in empty slots of super-classes")  \
+                                                                            \
+
 
 // Interface macros
 #define DECLARE_PRODUCT_FLAG(type, name, value, doc)      extern "C" type name;
--- a/src/hotspot/share/runtime/mutex.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/mutex.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -81,7 +81,7 @@
   char _name[MUTEX_NAME_LEN];            // Name of mutex/monitor
 
   // Debugging fields for naming, deadlock detection, etc. (some only used in debug mode)
-#ifdef ASSERT
+#ifndef PRODUCT
   bool    _allow_vm_block;
   int     _rank;                 // rank (to avoid/detect potential deadlocks)
   Mutex*  _next;                 // Used by a Thread to link up owned locks
--- a/src/hotspot/share/runtime/thread.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/thread.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -387,7 +387,7 @@
 
   log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: "
     PTR_FORMAT "-" PTR_FORMAT " (" SIZE_FORMAT "k).",
-    os::current_thread_id(), p2i(stack_base() - stack_size()),
+    os::current_thread_id(), p2i(stack_end()),
     p2i(stack_base()), stack_size()/1024);
 
   // Perform <ChildClass> initialization actions
@@ -1018,24 +1018,13 @@
 }
 #endif // ASSERT
 
+// Check for adr in the live portion of our stack.
 bool Thread::is_in_stack(address adr) const {
   assert(Thread::current() == this, "is_in_stack can only be called from current thread");
   address end = os::current_stack_pointer();
-  // Allow non Java threads to call this without stack_base
-  if (_stack_base == NULL) return true;
-  if (stack_base() > adr && adr >= end) return true;
-
-  return false;
+  return (stack_base() > adr && adr >= end);
 }
 
-bool Thread::is_in_usable_stack(address adr) const {
-  size_t stack_guard_size = os::uses_stack_guard_pages() ? JavaThread::stack_guard_zone_size() : 0;
-  size_t usable_stack_size = _stack_size - stack_guard_size;
-
-  return ((adr < stack_base()) && (adr >= stack_base() - usable_stack_size));
-}
-
-
 // We had to move these methods here, because vm threads get into ObjectSynchronizer::enter
 // However, there is a note in JavaThread::is_lock_owned() about the VM threads not being
 // used for compilation in the future. If that change is made, the need for these methods
@@ -1830,6 +1819,14 @@
 }
 
 
+// Check for adr in the usable portion of this thread's stack.
+bool JavaThread::is_in_usable_stack(address adr) const {
+  size_t stack_guard_size = os::uses_stack_guard_pages() ? JavaThread::stack_guard_zone_size() : 0;
+  size_t usable_stack_size = _stack_size - stack_guard_size;
+
+  return ((stack_base() > adr) && (adr >= (stack_base() - usable_stack_size)));
+}
+
 void JavaThread::block_if_vm_exited() {
   if (_terminated == _vm_exited) {
     // _vm_exited is set at safepoint, and Threads_lock is never released
@@ -2416,11 +2413,10 @@
       if (has_last_Java_frame()) {
         frame f = last_frame();
         if (f.is_runtime_frame() || f.is_safepoint_blob_frame()) {
-          // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
-          RegisterMap reg_map(this, UseBiasedLocking);
+          RegisterMap reg_map(this, false);
           frame compiled_frame = f.sender(&reg_map);
           if (!StressCompiledExceptionHandlers && compiled_frame.can_be_deoptimized()) {
-            Deoptimization::deoptimize(this, compiled_frame, &reg_map);
+            Deoptimization::deoptimize(this, compiled_frame);
           }
         }
       }
@@ -2865,8 +2861,7 @@
 // Deoptimization
 // Function for testing deoptimization
 void JavaThread::deoptimize() {
-  // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
-  StackFrameStream fst(this, UseBiasedLocking);
+  StackFrameStream fst(this, false);
   bool deopt = false;           // Dump stack only if a deopt actually happens.
   bool only_at = strlen(DeoptimizeOnlyAt) > 0;
   // Iterate over all frames in the thread and deoptimize
@@ -2903,7 +2898,7 @@
         trace_frames();
         trace_stack();
       }
-      Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
+      Deoptimization::deoptimize(this, *fst.current());
     }
   }
 
@@ -2929,11 +2924,10 @@
 
 void JavaThread::deoptimize_marked_methods() {
   if (!has_last_Java_frame()) return;
-  // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
-  StackFrameStream fst(this, UseBiasedLocking);
+  StackFrameStream fst(this, false);
   for (; !fst.is_done(); fst.next()) {
     if (fst.current()->should_be_deoptimized()) {
-      Deoptimization::deoptimize(this, *fst.current(), fst.register_map());
+      Deoptimization::deoptimize(this, *fst.current());
     }
   }
 }
--- a/src/hotspot/share/runtime/thread.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/thread.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -687,12 +687,15 @@
   // Used by fast lock support
   virtual bool is_lock_owned(address adr) const;
 
-  // Check if address is in the stack of the thread (not just for locks).
-  // Warning: the method can only be used on the running thread
+  // Check if address is in the live stack of this thread (not just for locks).
+  // Warning: can only be called by the current thread on itself.
   bool is_in_stack(address adr) const;
-  // Check if address is in the usable part of the stack (excludes protected
-  // guard pages)
-  bool is_in_usable_stack(address adr) const;
+
+  // Check if address in the stack mapped to this thread. Used mainly in
+  // error reporting (so has to include guard zone) and frame printing.
+  bool on_local_stack(address adr) const {
+    return (_stack_base > adr && adr >= stack_end());
+  }
 
   // Sets this thread as starting thread. Returns failure if thread
   // creation fails due to lack of memory, too many threads etc.
@@ -728,11 +731,6 @@
   void    record_stack_base_and_size();
   void    register_thread_stack_with_NMT() NOT_NMT_RETURN;
 
-  bool    on_local_stack(address adr) const {
-    // QQQ this has knowledge of direction, ought to be a stack method
-    return (_stack_base > adr && adr >= stack_end());
-  }
-
   int     lgrp_id() const        { return _lgrp_id; }
   void    set_lgrp_id(int value) { _lgrp_id = value; }
 
@@ -1732,6 +1730,11 @@
       stack_end() + MAX2(JavaThread::stack_guard_zone_size(), JavaThread::stack_shadow_zone_size());
   }
 
+  // Check if address is in the usable part of the stack (excludes protected
+  // guard pages). Can be applied to any thread and is an approximation for
+  // using is_in_stack when the query has to happen from another thread.
+  bool is_in_usable_stack(address adr) const;
+
   // Misc. accessors/mutators
   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
--- a/src/hotspot/share/runtime/vmOperations.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/vmOperations.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -134,12 +134,11 @@
         tcount = 0;
           int fcount = 0;
           // Deoptimize some selected frames.
-          // Biased llocking wants a updated register map
-          for(StackFrameStream fst(thread, UseBiasedLocking); !fst.is_done(); fst.next()) {
+          for(StackFrameStream fst(thread, false); !fst.is_done(); fst.next()) {
             if (fst.current()->can_be_deoptimized()) {
               if (fcount++ == fnum) {
                 fcount = 0;
-                Deoptimization::deoptimize(thread, *fst.current(), fst.register_map());
+                Deoptimization::deoptimize(thread, *fst.current());
               }
             }
           }
--- a/src/hotspot/share/runtime/vmStructs.cpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Wed Mar 04 12:01:01 2020 +0100
@@ -235,7 +235,7 @@
   nonstatic_field(InstanceKlass,               _static_oop_field_count,                       u2)                                    \
   nonstatic_field(InstanceKlass,               _nonstatic_oop_map_size,                       int)                                   \
   nonstatic_field(InstanceKlass,               _is_marked_dependent,                          bool)                                  \
-  nonstatic_field(InstanceKlass,               _misc_flags,                                   u2)                                    \
+  nonstatic_field(InstanceKlass,               _misc_flags,                                   u4)                                    \
   nonstatic_field(InstanceKlass,               _minor_version,                                u2)                                    \
   nonstatic_field(InstanceKlass,               _major_version,                                u2)                                    \
   nonstatic_field(InstanceKlass,               _init_state,                                   u1)                                    \
--- a/src/hotspot/share/utilities/powerOfTwo.hpp	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/hotspot/share/utilities/powerOfTwo.hpp	Wed Mar 04 12:01:01 2020 +0100
@@ -28,6 +28,7 @@
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/isIntegral.hpp"
 #include "metaprogramming/isSigned.hpp"
+#include "utilities/align.hpp"
 #include "utilities/count_leading_zeros.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
@@ -68,7 +69,7 @@
   STATIC_ASSERT(IsIntegral<T>::value);
   STATIC_ASSERT(IsSigned<T>::value);
   assert(value > 0, "Invalid value");
-  if (is_power_of_2(value)) {
+  if (is_power_of_2_t(value)) {
     return value;
   }
   uint32_t lz = count_leading_zeros(value);
@@ -84,7 +85,7 @@
   STATIC_ASSERT(IsIntegral<T>::value);
   STATIC_ASSERT(!IsSigned<T>::value);
   assert(value != 0, "Invalid value");
-  if (is_power_of_2(value)) {
+  if (is_power_of_2_t(value)) {
     return value;
   }
   uint32_t lz = count_leading_zeros(value);
--- a/src/java.base/share/classes/java/lang/Class.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/Class.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1994, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -374,15 +374,16 @@
      *  {@code Class.forName("Foo", true, this.getClass().getClassLoader())}
      * </blockquote>
      *
-     * Note that this method throws errors related to loading, linking or
-     * initializing as specified in Sections 12.2, 12.3 and 12.4 of <em>The
-     * Java Language Specification</em>.
+     * Note that this method throws errors related to loading, linking
+     * or initializing as specified in Sections {@jls 12.2}, {@jls
+     * 12.3}, and {@jls 12.4} of <em>The Java Language
+     * Specification</em>.
      * Note that this method does not check whether the requested class
      * is accessible to its caller.
      *
      * @param name       fully qualified name of the desired class
      * @param initialize if {@code true} the class will be initialized (which implies linking).
-     *                   See Section 12.4 of <em>The Java Language Specification</em>.
+     *                   See Section {@jls 12.4} of <em>The Java Language Specification</em>.
      * @param loader     class loader from which the class must be loaded
      * @return           class object representing the desired class
      *
@@ -660,7 +661,8 @@
      * specified {@code Class} parameter can be converted to the type
      * represented by this {@code Class} object via an identity conversion
      * or via a widening reference conversion. See <em>The Java Language
-     * Specification</em>, sections 5.1.1 and 5.1.4 , for details.
+     * Specification</em>, sections {@jls 5.1.1} and {@jls 5.1.4},
+     * for details.
      *
      * @param     cls the {@code Class} object to be checked
      * @return    the {@code boolean} value indicating whether objects of the
@@ -2399,7 +2401,7 @@
      * object represents an interface, a primitive type, an array class, or
      * void.
      *
-     * <p> See <em>The Java Language Specification</em>, section 8.2.
+     * <p> See <em>The Java Language Specification</em>, section {@jls 8.2}.
      *
      * @return  the array of {@code Constructor} objects representing all the
      *          declared constructors of this class
--- a/src/java.base/share/classes/java/lang/Enum.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/Enum.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,8 +44,8 @@
  *
  * More information about enums, including descriptions of the
  * implicitly declared methods synthesized by the compiler, can be
- * found in section 8.9 of
- * <cite>The Java&trade; Language Specification</cite>.
+ * found in section {@jls 8.9} of <cite>The Java&trade; Language
+ * Specification</cite>.
  *
  * Enumeration types are all serializable and receive special handling
  * by the serialization mechanism. The serialized representation used
--- a/src/java.base/share/classes/java/lang/Module.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/Module.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1094,13 +1094,14 @@
 
         // map each module to a class loader
         ClassLoader pcl = ClassLoaders.platformClassLoader();
+        boolean isModuleLoaderMapper = ModuleLoaderMap.isBuiltinMapper(clf);
 
         for (int index = 0; index < numModules; index++) {
             String name = resolvedModules[index].name();
             ClassLoader loader = clf.apply(name);
 
             if (loader == null || loader == pcl) {
-                if (!(clf instanceof ModuleLoaderMap.Mapper)) {
+                if (!isModuleLoaderMapper) {
                     throw new IllegalArgumentException("loader can't be 'null'"
                             + " or the platform class loader");
                 }
--- a/src/java.base/share/classes/java/lang/Record.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/Record.java	Wed Mar 04 12:01:01 2020 +0100
@@ -109,7 +109,7 @@
      * @implSpec
      * The implicitly provided implementation returns {@code true} if
      * and only if the argument is an instance of the same record type
-     * as this object, and each component of this record is equal to
+     * as this record, and each component of this record is equal to
      * the corresponding component of the argument; otherwise, {@code
      * false} is returned. Equality of a component {@code c} is
      * determined as follows:
@@ -130,46 +130,70 @@
      *
      * </ul>
      *
-     * The implicitly provided implementation conforms to the
-     * semantics described above; the implementation may or may not
-     * accomplish this by using calls to the particular methods
-     * listed.
+     * Apart from the semantics described above, the precise algorithm
+     * used in the implicitly provided implementation is unspecified
+     * and is subject to change. The implementation may or may not use
+     * calls to the particular methods listed, and may or may not
+     * perform comparisons in the order of component declaration.
      *
      * @see java.util.Objects#equals(Object,Object)
      *
      * @param   obj   the reference object with which to compare.
-     * @return  {@code true} if this object is equal to the
+     * @return  {@code true} if this record is equal to the
      *          argument; {@code false} otherwise.
      */
     @Override
     public abstract boolean equals(Object obj);
 
     /**
+     * Returns a hash code value for the record.
      * Obeys the general contract of {@link Object#hashCode Object.hashCode}.
+     * For records, hashing behavior is constrained by the refined contract
+     * of {@link Record#equals Record.equals}, so that any two records
+     * created from the same components must have the same hash code.
      *
      * @implSpec
      * The implicitly provided implementation returns a hash code value derived
-     * by combining the hash code value for all the components, according to
-     * {@link Object#hashCode()} for components whose types are reference types,
-     * or the primitive wrapper hash code for components whose types are primitive
-     * types.
+     * by combining appropriate hashes from each component.
+     * The precise algorithm used in the implicitly provided implementation
+     * is unspecified and is subject to change within the above limits.
+     * The resulting integer need not remain consistent from one
+     * execution of an application to another execution of the same
+     * application, even if the hashes of the component values were to
+     * remain consistent in this way.  Also, a component of primitive
+     * type may contribute its bits to the hash code differently than
+     * the {@code hashCode} of its primitive wrapper class.
      *
      * @see     Object#hashCode()
      *
-     * @return  a hash code value for this object.
+     * @return  a hash code value for this record.
      */
     @Override
     public abstract int hashCode();
 
     /**
-     * Obeys the general contract of {@link Object#toString Object.toString}.
+     * Returns a string representation of the record.
+     * In accordance with the general contract of {@link Object#toString()},
+     * the {@code toString} method returns a string that
+     * "textually represents" this record. The result should
+     * be a concise but informative representation that is easy for a
+     * person to read.
+     * <p>
+     * In addition to this general contract, record classes must further
+     * participate in the invariant that any two records which are
+     * {@linkplain Record#equals(Object) equal} must produce equal
+     * strings.  This invariant is necessarily relaxed in the rare
+     * case where corresponding equal component values might fail
+     * to produce equal strings for themselves.
      *
      * @implSpec
-     * The implicitly provided implementation returns a string that is derived
-     * from the name of the record class and the names and string representations
-     * of all the components, according to {@link Object#toString()} for components
-     * whose types are reference types, and the primitive wrapper {@code toString}
-     * method for components whose types are primitive types.
+     * The implicitly provided implementation returns a string which
+     * contains the name of the record class, the names of components
+     * of the record, and string representations of component values,
+     * so as to fulfill the contract of this method.
+     * The precise format produced by this implicitly provided implementation
+     * is subject to change, so the present syntax should not be parsed
+     * by applications to recover record component values.
      *
      * @see     Object#toString()
      *
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandle.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandle.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -222,8 +222,9 @@
  * refers directly to an associated {@code CONSTANT_Methodref},
  * {@code CONSTANT_InterfaceMethodref}, or {@code CONSTANT_Fieldref}
  * constant pool entry.
- * (For full details on method handle constants,
- * see sections 4.4.8 and 5.4.3.5 of the Java Virtual Machine Specification.)
+ * (For full details on method handle constants, see sections {@jvms
+ * 4.4.8} and {@jvms 5.4.3.5} of the Java Virtual Machine
+ * Specification.)
  * <p>
  * Method handles produced by lookups or constant loads from methods or
  * constructors with the variable arity modifier bit ({@code 0x0080})
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandleInfo.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleInfo.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,8 @@
  * Direct method handles may be obtained in any of these ways:
  * <ul>
  * <li>By executing an {@code ldc} instruction on a {@code CONSTANT_MethodHandle} constant.
- *     (See the Java Virtual Machine Specification, sections 4.4.8 and 5.4.3.)
+ *     (See the Java Virtual Machine Specification, sections {@jvms
+ *     4.4.8} and {@jvms 5.4.3}.)
  * <li>By calling one of the <a href="MethodHandles.Lookup.html#lookups">Lookup Factory Methods</a>,
  *     such as {@link Lookup#findVirtual Lookup.findVirtual},
  *     to resolve a symbolic reference into a method handle.
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -318,7 +318,8 @@
      * use cases for methods, constructors, and fields.
      * Each method handle created by a factory method is the functional
      * equivalent of a particular <em>bytecode behavior</em>.
-     * (Bytecode behaviors are described in section 5.4.3.5 of the Java Virtual Machine Specification.)
+     * (Bytecode behaviors are described in section {@jvms 5.4.3.5} of
+     * the Java Virtual Machine Specification.)
      * Here is a summary of the correspondence between these factory methods and
      * the behavior of the resulting method handles:
      * <table class="striped">
@@ -502,7 +503,8 @@
      * If the desired member is {@code protected}, the usual JVM rules apply,
      * including the requirement that the lookup class must either be in the
      * same package as the desired member, or must inherit that member.
-     * (See the Java Virtual Machine Specification, sections 4.9.2, 5.4.3.5, and 6.4.)
+     * (See the Java Virtual Machine Specification, sections {@jvms
+     * 4.9.2}, {@jvms 5.4.3.5}, and {@jvms 6.4}.)
      * In addition, if the desired member is a non-static field or method
      * in a different package, the resulting method handle may only be applied
      * to objects of the lookup class or one of its subclasses.
@@ -515,7 +517,7 @@
      * that the receiver argument must match both the resolved method <em>and</em>
      * the current class.  Again, this requirement is enforced by narrowing the
      * type of the leading parameter to the resulting method handle.
-     * (See the Java Virtual Machine Specification, section 4.10.1.9.)
+     * (See the Java Virtual Machine Specification, section {@jmvs 4.10.1.9}.)
      * <p>
      * The JVM represents constructors and static initializer blocks as internal methods
      * with special names ({@code "<init>"} and {@code "<clinit>"}).
@@ -525,7 +527,8 @@
      * <p>
      * If the relationship between nested types is expressed directly through the
      * {@code NestHost} and {@code NestMembers} attributes
-     * (see the Java Virtual Machine Specification, sections 4.7.28 and 4.7.29),
+     * (see the Java Virtual Machine Specification, sections {@jvms
+     * 4.7.28} and {@jvms 4.7.29}),
      * then the associated {@code Lookup} object provides direct access to
      * the lookup class and all of its nestmates
      * (see {@link java.lang.Class#getNestHost Class.getNestHost}).
--- a/src/java.base/share/classes/java/lang/invoke/MethodType.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/MethodType.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,8 +91,9 @@
  * A method type may be loaded by an {@code ldc} instruction which refers
  * to a suitable {@code CONSTANT_MethodType} constant pool entry.
  * The entry refers to a {@code CONSTANT_Utf8} spelling for the descriptor string.
- * (For full details on method type constants,
- * see sections 4.4.8 and 5.4.3.5 of the Java Virtual Machine Specification.)
+ * (For full details on method type constants, see sections {@jvms
+ * 4.4.8} and {@jvms 5.4.3.5} of the Java Virtual Machine
+ * Specification.)
  * <p>
  * When the JVM materializes a {@code MethodType} from a descriptor string,
  * all classes named in the descriptor must be accessible, and will be loaded.
--- a/src/java.base/share/classes/java/lang/module/ResolvedModule.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/module/ResolvedModule.java	Wed Mar 04 12:01:01 2020 +0100
@@ -79,7 +79,7 @@
      * @return The module descriptor
      */
     ModuleDescriptor descriptor() {
-        return reference().descriptor();
+        return mref.descriptor();
     }
 
     /**
@@ -93,7 +93,7 @@
      * @return The module name
      */
     public String name() {
-        return reference().descriptor().name();
+        return mref.descriptor().name();
     }
 
     /**
--- a/src/java.base/share/classes/java/lang/reflect/Method.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/reflect/Method.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -503,7 +503,7 @@
      *
      * <p>If the underlying method is an instance method, it is invoked
      * using dynamic method lookup as documented in The Java Language
-     * Specification, section 15.12.4.4; in particular,
+     * Specification, section {@jls 15.12.4.4}; in particular,
      * overriding based on the runtime type of the target object may occur.
      *
      * <p>If the underlying method is static, the class that declared
--- a/src/java.base/share/classes/java/lang/reflect/ProxyGenerator.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/lang/reflect/ProxyGenerator.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -383,7 +383,7 @@
      * separator, the external representation used by the Java language
      * and APIs, to a fully qualified class name that uses '/' as the
      * package separator, the representation used in the class file
-     * format (see JVMS section 4.2).
+     * format (see JVMS section {@jvms 4.2}).
      */
     private static String dotToSlash(String name) {
         return name.replace('.', '/');
--- a/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/time/format/DateTimeFormatterBuilder.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -4260,9 +4260,15 @@
                 char nextNextChar = text.charAt(position + 1);
                 if (context.charEquals(nextChar, 'U') && context.charEquals(nextNextChar, 'T')) {
                     if (length >= position + 3 && context.charEquals(text.charAt(position + 2), 'C')) {
-                        return parseOffsetBased(context, text, position, position + 3, OffsetIdPrinterParser.INSTANCE_ID_ZERO);
+                        // There are localized zone texts that start with "UTC", e.g.
+                        // "UTC\u221210:00" (MINUS SIGN instead of HYPHEN-MINUS) in French.
+                        // Exclude those ZoneText cases.
+                        if (!(this instanceof ZoneTextPrinterParser)) {
+                            return parseOffsetBased(context, text, position, position + 3, OffsetIdPrinterParser.INSTANCE_ID_ZERO);
+                        }
+                    } else {
+                        return parseOffsetBased(context, text, position, position + 2, OffsetIdPrinterParser.INSTANCE_ID_ZERO);
                     }
-                    return parseOffsetBased(context, text, position, position + 2, OffsetIdPrinterParser.INSTANCE_ID_ZERO);
                 } else if (context.charEquals(nextChar, 'G') && length >= position + 3 &&
                         context.charEquals(nextNextChar, 'M') && context.charEquals(text.charAt(position + 2), 'T')) {
                     if (length >= position + 4 && context.charEquals(text.charAt(position + 3), '0')) {
--- a/src/java.base/share/classes/java/util/IdentityHashMap.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/util/IdentityHashMap.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -115,17 +115,19 @@
  * exception for its correctness: <i>fail-fast iterators should be used only
  * to detect bugs.</i>
  *
- * <p>Implementation note: This is a simple <i>linear-probe</i> hash table,
- * as described for example in texts by Sedgewick and Knuth.  The array
- * alternates holding keys and values.  (This has better locality for large
- * tables than does using separate arrays.)  For many JRE implementations
- * and operation mixes, this class will yield better performance than
- * {@link HashMap} (which uses <i>chaining</i> rather than linear-probing).
- *
  * <p>This class is a member of the
  * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
  * Java Collections Framework</a>.
  *
+ * @implNote
+ * <p>This is a simple <i>linear-probe</i> hash table,
+ * as described for example in texts by Sedgewick and Knuth.  The array
+ * contains alternating keys and values, with keys at even indexes and values
+ * at odd indexes. (This arrangement has better locality for large
+ * tables than does using separate arrays.)  For many Java implementations
+ * and operation mixes, this class will yield better performance than
+ * {@link HashMap}, which uses <i>chaining</i> rather than linear-probing.
+ *
  * @see     System#identityHashCode(Object)
  * @see     Object#hashCode()
  * @see     Collection
@@ -293,7 +295,7 @@
      */
     private static int hash(Object x, int length) {
         int h = System.identityHashCode(x);
-        // Multiply by -127, and left-shift to use least bit as part of hash
+        // Multiply by -254 to use the hash LSB and to ensure index is even
         return ((h << 1) - (h << 8)) & (length - 1);
     }
 
--- a/src/java.base/share/classes/java/util/ImmutableCollections.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/util/ImmutableCollections.java	Wed Mar 04 12:01:01 2020 +0100
@@ -904,6 +904,20 @@
         @Override public V replace(K key, V value) { throw uoe(); }
         @Override public boolean replace(K key, V oldValue, V newValue) { throw uoe(); }
         @Override public void replaceAll(BiFunction<? super K,? super V,? extends V> f) { throw uoe(); }
+
+        /**
+         * @implNote {@code null} values are disallowed in these immutable maps,
+         * so we can improve upon the default implementation since a
+         * {@code null} return from {@code get(key)} always means the default
+         * value should be returned.
+         */
+        @Override
+        public V getOrDefault(Object key, V defaultValue) {
+            V v;
+            return ((v = get(key)) != null)
+                    ? v
+                    : defaultValue;
+        }
     }
 
     static final class Map1<K,V> extends AbstractImmutableMap<K,V> {
--- a/src/java.base/share/classes/java/util/regex/Pattern.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/util/regex/Pattern.java	Wed Mar 04 12:01:01 2020 +0100
@@ -2064,7 +2064,7 @@
         Node prev = null;
         Node firstTail = null;
         Branch branch = null;
-        Node branchConn = null;
+        BranchConn branchConn = null;
 
         for (;;) {
             Node node = sequence(end);
@@ -2212,7 +2212,24 @@
                 break;
             }
 
-            node = closure(node);
+            if (node instanceof LineEnding) {
+                LineEnding le = (LineEnding)node;
+                node = closureOfLineEnding(le);
+
+                if (node != le) {
+                    // LineEnding was replaced with an anonymous group
+                    if (head == null)
+                        head = node;
+                    else
+                        tail.next = node;
+                    // Double return: Tail was returned in root
+                    tail = root;
+                    continue;
+                }
+            } else {
+                node = closure(node);
+            }
+
             /* save the top dot-greedy nodes (.*, .+) as well
             if (node instanceof GreedyCharProperty &&
                 ((GreedyCharProperty)node).cp instanceof Dot) {
@@ -3079,18 +3096,31 @@
         if (saveTCNCount < topClosureNodes.size())
             topClosureNodes.subList(saveTCNCount, topClosureNodes.size()).clear();
 
+        return groupWithClosure(node, head, tail, capturingGroup);
+    }
+
+    /**
+     * Transforms a Group with quantifiers into some special constructs
+     * (such as Branch or Loop/GroupCurly), if necessary.
+     *
+     * This method is applied either to actual groups or to the Unicode
+     * linebreak (aka \\R) represented as an anonymous group.
+     */
+    private Node groupWithClosure(Node node, Node head, Node tail,
+                                  boolean capturingGroup)
+    {
         if (node instanceof Ques) {
             Ques ques = (Ques) node;
             if (ques.type == Qtype.POSSESSIVE) {
                 root = node;
                 return node;
             }
-            tail.next = new BranchConn();
-            tail = tail.next;
+            BranchConn branchConn = new BranchConn();
+            tail = tail.next = branchConn;
             if (ques.type == Qtype.GREEDY) {
-                head = new Branch(head, null, tail);
+                head = new Branch(head, null, branchConn);
             } else { // Reluctant quantifier
-                head = new Branch(null, head, tail);
+                head = new Branch(null, head, branchConn);
             }
             root = tail;
             return head;
@@ -3268,6 +3298,31 @@
     }
 
     /**
+     * Processing repetition of a Unicode linebreak \\R.
+     */
+    private Node closureOfLineEnding(LineEnding le) {
+        int ch = peek();
+        if (ch != '?' && ch != '*' && ch != '+' && ch != '{') {
+            return le;
+        }
+
+        // Replace the LineEnding with an anonymous group
+        // (?:\\u000D\\u000A|[\\u000A\\u000B\\u000C\\u000D\\u0085\\u2028\\u2029])
+        Node grHead = createGroup(true);
+        Node grTail = root;
+        BranchConn branchConn = new BranchConn();
+        branchConn.next = grTail;
+        Node slice = new Slice(new int[] {0x0D, 0x0A});
+        slice.next = branchConn;
+        Node chClass = newCharProperty(x -> x == 0x0A || x == 0x0B ||
+                x == 0x0C || x == 0x0D || x == 0x85 || x == 0x2028 ||
+                x == 0x2029);
+        chClass.next = branchConn;
+        grHead.next = new Branch(slice, chClass, branchConn);
+        return groupWithClosure(closure(grHead), grHead, grTail, false);
+    }
+
+    /**
      * Processes repetition. If the next character peeked is a quantifier
      * then new nodes must be appended to handle the repetition.
      * Prev could be a single or a group, so it could be a chain of nodes.
@@ -4723,8 +4778,8 @@
     static final class Branch extends Node {
         Node[] atoms = new Node[2];
         int size = 2;
-        Node conn;
-        Branch(Node first, Node second, Node branchConn) {
+        BranchConn conn;
+        Branch(Node first, Node second, BranchConn branchConn) {
             conn = branchConn;
             atoms[0] = first;
             atoms[1] = second;
@@ -4732,9 +4787,10 @@
 
         void add(Node node) {
             if (size >= atoms.length) {
-                Node[] tmp = new Node[atoms.length*2];
-                System.arraycopy(atoms, 0, tmp, 0, atoms.length);
-                atoms = tmp;
+                int len = ArraysSupport.newLength(size,
+                        1,    /* minimum growth */
+                        size  /* preferred growth */);
+                atoms = Arrays.copyOf(atoms, len);
             }
             atoms[size++] = node;
         }
--- a/src/java.base/share/classes/java/util/stream/package-info.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/java/util/stream/package-info.java	Wed Mar 04 12:01:01 2020 +0100
@@ -43,7 +43,7 @@
  * <p>The key abstraction introduced in this package is <em>stream</em>.  The
  * classes {@link java.util.stream.Stream}, {@link java.util.stream.IntStream},
  * {@link java.util.stream.LongStream}, and {@link java.util.stream.DoubleStream}
- * are streams over objects and the primitive {@code int}, {@code long} and
+ * are streams over objects and the primitive {@code int}, {@code long}, and
  * {@code double} types.  Streams differ from collections in several ways:
  *
  * <ul>
@@ -176,7 +176,7 @@
  * do:
  *
  * <pre>{@code
- *     int sumOfWeights = widgets.<b>parallelStream()</b>
+ *     int sumOfWeights = widgets.parallelStream()
  *                               .filter(b -> b.getColor() == RED)
  *                               .mapToInt(b -> b.getWeight())
  *                               .sum();
@@ -242,7 +242,7 @@
  *     String s = sl.collect(joining(" "));
  * }</pre>
  *
- * First a list is created consisting of two strings: "one"; and "two". Then a
+ * First a list is created consisting of two strings: "one" and "two". Then a
  * stream is created from that list. Next the list is modified by adding a third
  * string: "three". Finally the elements of the stream are collected and joined
  * together. Since the list was modified before the terminal {@code collect}
@@ -344,7 +344,7 @@
  * parallelization:
  *
  * <pre>{@code
- *     List<String>results =
+ *     List<String> results =
  *         stream.filter(s -> pattern.matcher(s).matches())
  *               .collect(Collectors.toList());  // No side-effects!
  * }</pre>
--- a/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,8 @@
 import java.lang.module.Configuration;
 import java.lang.module.ModuleFinder;
 import java.util.Map;
-import java.util.Objects;
 import java.util.Set;
+import java.util.function.Function;
 
 import jdk.internal.misc.VM;
 
@@ -39,26 +39,26 @@
 final class ArchivedModuleGraph {
     private static ArchivedModuleGraph archivedModuleGraph;
 
-    private final String mainModule;
     private final boolean hasSplitPackages;
     private final boolean hasIncubatorModules;
     private final ModuleFinder finder;
     private final Configuration configuration;
+    private final Function<String, ClassLoader> classLoaderFunction;
     private final Map<String, Set<String>> concealedPackagesToOpen;
     private final Map<String, Set<String>> exportedPackagesToOpen;
 
-    private ArchivedModuleGraph(String mainModule,
-                                boolean hasSplitPackages,
-                                boolean hasIncubatorModules,
-                                ModuleFinder finder,
-                                Configuration configuration,
-                                Map<String, Set<String>> concealedPackagesToOpen,
-                                Map<String, Set<String>> exportedPackagesToOpen) {
-        this.mainModule = mainModule;
+    public ArchivedModuleGraph(boolean hasSplitPackages,
+                               boolean hasIncubatorModules,
+                               ModuleFinder finder,
+                               Configuration configuration,
+                               Function<String, ClassLoader> classLoaderFunction,
+                               Map<String, Set<String>> concealedPackagesToOpen,
+                               Map<String, Set<String>> exportedPackagesToOpen) {
         this.hasSplitPackages = hasSplitPackages;
         this.hasIncubatorModules = hasIncubatorModules;
         this.finder = finder;
         this.configuration = configuration;
+        this.classLoaderFunction = classLoaderFunction;
         this.concealedPackagesToOpen = concealedPackagesToOpen;
         this.exportedPackagesToOpen = exportedPackagesToOpen;
     }
@@ -71,6 +71,10 @@
         return configuration;
     }
 
+    Function<String, ClassLoader> classLoaderFunction() {
+        return classLoaderFunction;
+    }
+
     Map<String, Set<String>> concealedPackagesToOpen() {
         return concealedPackagesToOpen;
     }
@@ -92,7 +96,8 @@
      */
     static ArchivedModuleGraph get(String mainModule) {
         ArchivedModuleGraph graph = archivedModuleGraph;
-        if (graph != null && Objects.equals(mainModule, graph.mainModule)) {
+        // We only allow the unnamed module (default) case for now
+        if (mainModule == null) {
             return graph;
         } else {
             return null;
@@ -102,23 +107,8 @@
     /**
      * Archive the module graph for the given initial module.
      */
-    static void archive(String mainModule,
-                        boolean hasSplitPackages,
-                        boolean hasIncubatorModules,
-                        ModuleFinder finder,
-                        Configuration configuration,
-                        Map<String, Set<String>> concealedPackagesToOpen,
-                        Map<String, Set<String>> exportedPackagesToOpen) {
-        if (mainModule != null) {
-            throw new UnsupportedOperationException();
-        }
-        archivedModuleGraph = new ArchivedModuleGraph(mainModule,
-                                                      hasSplitPackages,
-                                                      hasIncubatorModules,
-                                                      finder,
-                                                      configuration,
-                                                      concealedPackagesToOpen,
-                                                      exportedPackagesToOpen);
+    static void archive(ArchivedModuleGraph graph) {
+        archivedModuleGraph = graph;
     }
 
     static {
--- a/src/java.base/share/classes/jdk/internal/module/ModuleBootstrap.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/jdk/internal/module/ModuleBootstrap.java	Wed Mar 04 12:01:01 2020 +0100
@@ -370,7 +370,12 @@
         // loader.
 
         // mapping of modules to class loaders
-        Function<String, ClassLoader> clf = ModuleLoaderMap.mappingFunction(cf);
+        Function<String, ClassLoader> clf;
+        if (archivedModuleGraph != null) {
+            clf = archivedModuleGraph.classLoaderFunction();
+        } else {
+            clf = ModuleLoaderMap.mappingFunction(cf);
+        }
 
         // check that all modules to be mapped to the boot loader will be
         // loaded from the runtime image
@@ -440,13 +445,14 @@
         // Module graph can be archived at CDS dump time. Only allow the
         // unnamed module case for now.
         if (canArchive && (mainModule == null)) {
-            ArchivedModuleGraph.archive(mainModule,
-                                        hasSplitPackages,
-                                        hasIncubatorModules,
-                                        systemModuleFinder,
-                                        cf,
-                                        concealedPackagesToOpen,
-                                        exportedPackagesToOpen);
+            ArchivedModuleGraph.archive(
+                    new ArchivedModuleGraph(hasSplitPackages,
+                                            hasIncubatorModules,
+                                            systemModuleFinder,
+                                            cf,
+                                            clf,
+                                            concealedPackagesToOpen,
+                                            exportedPackagesToOpen));
         }
 
         // total time to initialize
@@ -737,7 +743,6 @@
                         Modules.addExports(m, pn, other);
                     }
                 }
-
             }
         }
     }
--- a/src/java.base/share/classes/jdk/internal/module/ModuleLoaderMap.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/jdk/internal/module/ModuleLoaderMap.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,12 @@
 import java.lang.module.Configuration;
 import java.lang.module.ResolvedModule;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.function.Function;
 
 import jdk.internal.loader.ClassLoaders;
 
-
 /**
  * Supports the mapping of modules to class loaders. The set of modules mapped
  * to the boot and platform class loaders is generated at build time from
@@ -46,16 +44,55 @@
     /**
      * Maps the system modules to the built-in class loaders.
      */
-    public static final class Mapper implements Function<String, ClassLoader> {
-        private final Map<String, ClassLoader> map;
+    private static final class Mapper implements Function<String, ClassLoader> {
 
-        Mapper(Map<String, ClassLoader> map) {
-            this.map = map; // defensive copy not needed
+        private static final ClassLoader PLATFORM_CLASSLOADER =
+                ClassLoaders.platformClassLoader();
+        private static final ClassLoader APP_CLASSLOADER =
+                ClassLoaders.appClassLoader();
+
+        private static final Integer PLATFORM_LOADER_INDEX = 1;
+        private static final Integer APP_LOADER_INDEX      = 2;
+
+        /**
+         * Map from module to a class loader index. The index is resolved to the
+         * actual class loader in {@code apply}.
+         */
+        private final Map<String, Integer> map;
+
+        /**
+         * Creates a Mapper to map module names in the given Configuration to
+         * built-in classloaders.
+         *
+         * As a proxy for the actual classloader, we store an easily archiveable
+         * index value in the internal map. The index is stored as a boxed value
+         * so that we can cheaply do identity comparisons during bootstrap.
+         */
+        Mapper(Configuration cf) {
+            var map = new HashMap<String, Integer>();
+            for (ResolvedModule resolvedModule : cf.modules()) {
+                String mn = resolvedModule.name();
+                if (!Modules.bootModules.contains(mn)) {
+                    if (Modules.platformModules.contains(mn)) {
+                        map.put(mn, PLATFORM_LOADER_INDEX);
+                    } else {
+                        map.put(mn, APP_LOADER_INDEX);
+                    }
+                }
+            }
+            this.map = map;
         }
 
         @Override
         public ClassLoader apply(String name) {
-            return map.get(name);
+            Integer loader = map.get(name);
+            if (loader == APP_LOADER_INDEX) {
+                return APP_CLASSLOADER;
+            } else if (loader == PLATFORM_LOADER_INDEX) {
+                return PLATFORM_CLASSLOADER;
+            } else { // BOOT_LOADER_INDEX
+                return null;
+            }
         }
     }
 
@@ -63,50 +100,40 @@
      * Returns the names of the modules defined to the boot loader.
      */
     public static Set<String> bootModules() {
-        // The list of boot modules generated at build time.
-        String[] BOOT_MODULES = new String[] { "@@BOOT_MODULE_NAMES@@" };
-        Set<String> bootModules = new HashSet<>(BOOT_MODULES.length);
-        for (String mn : BOOT_MODULES) {
-            bootModules.add(mn);
-        }
-        return bootModules;
+        return Modules.bootModules;
     }
 
     /**
      * Returns the names of the modules defined to the platform loader.
      */
     public static Set<String> platformModules() {
-        // The list of platform modules generated at build time.
-        String[] PLATFORM_MODULES = new String[] { "@@PLATFORM_MODULE_NAMES@@" };
-        Set<String> platformModules = new HashSet<>(PLATFORM_MODULES.length);
-        for (String mn : PLATFORM_MODULES) {
-            platformModules.add(mn);
-        }
-        return platformModules;
+        return Modules.platformModules;
+    }
+
+    private static class Modules {
+        // list of boot modules is generated at build time.
+        private static final Set<String> bootModules =
+                Set.of(new String[] { "@@BOOT_MODULE_NAMES@@" });
+
+        // list of platform modules is generated at build time.
+        private static final Set<String> platformModules =
+                Set.of(new String[] { "@@PLATFORM_MODULE_NAMES@@" });
     }
 
     /**
-     * Returns the function to map modules in the given configuration to the
+     * Returns a function to map modules in the given configuration to the
      * built-in class loaders.
      */
     static Function<String, ClassLoader> mappingFunction(Configuration cf) {
-        Set<String> bootModules = bootModules();
-        Set<String> platformModules = platformModules();
+        return new Mapper(cf);
+    }
 
-        ClassLoader platformClassLoader = ClassLoaders.platformClassLoader();
-        ClassLoader appClassLoader = ClassLoaders.appClassLoader();
-
-        Map<String, ClassLoader> map = new HashMap<>();
-        for (ResolvedModule resolvedModule : cf.modules()) {
-            String mn = resolvedModule.name();
-            if (!bootModules.contains(mn)) {
-                if (platformModules.contains(mn)) {
-                    map.put(mn, platformClassLoader);
-                } else {
-                    map.put(mn, appClassLoader);
-                }
-            }
-        }
-        return new Mapper(map);
+    /**
+     * When defining modules for a configuration, we only allow defining modules
+     * to the boot or platform classloader if the ClassLoader mapping function
+     * originate from here.
+     */
+    public static boolean isBuiltinMapper(Function<String, ClassLoader> clf) {
+        return clf instanceof Mapper;
     }
 }
--- a/src/java.base/share/classes/module-info.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/module-info.java	Wed Mar 04 12:01:01 2020 +0100
@@ -162,7 +162,8 @@
         jdk.jlink;
     exports jdk.internal.loader to
         java.instrument,
-        java.logging;
+        java.logging,
+        java.naming;
     exports jdk.internal.jmod to
         jdk.compiler,
         jdk.jlink;
--- a/src/java.base/share/classes/sun/security/rsa/RSAPSSSignature.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/sun/security/rsa/RSAPSSSignature.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -304,11 +304,11 @@
     private static void checkKeyLength(RSAKey key, int digestLen,
             int saltLen) throws SignatureException {
         if (key != null) {
-            int keyLength = getKeyLengthInBits(key) >> 3;
+            int keyLength = (getKeyLengthInBits(key) + 7) >> 3;
             int minLength = Math.addExact(Math.addExact(digestLen, saltLen), 2);
             if (keyLength < minLength) {
                 throw new SignatureException
-                    ("Key is too short, need min " + minLength);
+                    ("Key is too short, need min " + minLength + " bytes");
             }
         }
     }
@@ -429,7 +429,7 @@
         }
         try {
             int emBits = getKeyLengthInBits(this.privKey) - 1;
-            int emLen =(emBits + 7) >> 3;
+            int emLen = (emBits + 7) >> 3;
             int hLen = this.md.getDigestLength();
             int dbLen = emLen - hLen - 1;
             int sLen = this.sigParams.getSaltLength();
@@ -472,6 +472,7 @@
             // step11: set the leftmost (8emLen - emBits) bits of the leftmost
             // octet to 0
             int numZeroBits = (emLen << 3) - emBits;
+
             if (numZeroBits != 0) {
                 byte MASK = (byte) (0xff >>> numZeroBits);
                 em[0] = (byte) (em[0] & MASK);
@@ -485,15 +486,22 @@
     }
 
     /**
-     * Decode the signature data. Verify that the object identifier matches
-     * and return the message digest.
+     * Decode the signature data as under RFC8017 sec9.1.2 EMSA-PSS-VERIFY
      */
     private boolean decodeSignature(byte[] mHash, byte[] em)
             throws IOException {
         int hLen = mHash.length;
         int sLen = this.sigParams.getSaltLength();
-        int emLen = em.length;
         int emBits = getKeyLengthInBits(this.pubKey) - 1;
+        int emLen = (emBits + 7) >> 3;
+
+        // When key length is 8N+1 bits (N+1 bytes), emBits = 8N,
+        // emLen = N which is one byte shorter than em.length.
+        // Otherwise, emLen should be same as em.length
+        int emOfs = em.length - emLen;
+        if ((emOfs == 1) && (em[0] != 0)) {
+            return false;
+        }
 
         // step3
         if (emLen < (hLen + sLen + 2)) {
@@ -501,16 +509,17 @@
         }
 
         // step4
-        if (em[emLen - 1] != (byte) 0xBC) {
+        if (em[emOfs + emLen - 1] != (byte) 0xBC) {
             return false;
         }
 
         // step6: check if the leftmost (8emLen - emBits) bits of the leftmost
         // octet are 0
         int numZeroBits = (emLen << 3) - emBits;
+
         if (numZeroBits != 0) {
             byte MASK = (byte) (0xff << (8 - numZeroBits));
-            if ((em[0] & MASK) != 0) {
+            if ((em[emOfs] & MASK) != 0) {
                 return false;
             }
         }
@@ -526,7 +535,8 @@
         int dbLen = emLen - hLen - 1;
         try {
             MGF1 mgf1 = new MGF1(mgfDigestAlgo);
-            mgf1.generateAndXor(em, dbLen, hLen, dbLen, em, 0);
+            mgf1.generateAndXor(em, emOfs + dbLen, hLen, dbLen,
+                    em, emOfs);
         } catch (NoSuchAlgorithmException nsae) {
             throw new IOException(nsae.toString());
         }
@@ -535,12 +545,12 @@
         //  octet to 0
         if (numZeroBits != 0) {
             byte MASK = (byte) (0xff >>> numZeroBits);
-            em[0] = (byte) (em[0] & MASK);
+            em[emOfs] = (byte) (em[emOfs] & MASK);
         }
 
         // step10
-        int i = 0;
-        for (; i < dbLen - sLen - 1; i++) {
+        int i = emOfs;
+        for (; i < emOfs + (dbLen - sLen - 1); i++) {
             if (em[i] != 0) {
                 return false;
             }
@@ -553,13 +563,14 @@
         digestReset = false;
         this.md.update(mHash);
         if (sLen > 0) {
-            this.md.update(em, (dbLen - sLen), sLen);
+            this.md.update(em, emOfs + (dbLen - sLen), sLen);
         }
         byte[] digest2 = this.md.digest();
         digestReset = true;
 
         // step14
-        byte[] digestInEM = Arrays.copyOfRange(em, dbLen, emLen - 1);
+        byte[] digestInEM = Arrays.copyOfRange(em, emOfs + dbLen,
+                emOfs + emLen - 1);
         return MessageDigest.isEqual(digest2, digestInEM);
     }
 
--- a/src/java.base/share/classes/sun/util/cldr/CLDRTimeZoneNameProviderImpl.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/classes/sun/util/cldr/CLDRTimeZoneNameProviderImpl.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,6 @@
 import java.util.ResourceBundle;
 import java.util.Set;
 import java.util.TimeZone;
-import java.util.stream.Collectors;
 import sun.util.calendar.ZoneInfoFile;
 import sun.util.locale.provider.LocaleProviderAdapter;
 import sun.util.locale.provider.LocaleResources;
@@ -73,8 +72,6 @@
 
     @Override
     protected String[] getDisplayNameArray(String id, Locale locale) {
-        // Use English for the ROOT locale
-        locale = locale.equals(Locale.ROOT) ? Locale.ENGLISH : locale;
         String[] namesSuper = super.getDisplayNameArray(id, locale);
 
         if (namesSuper == null) {
@@ -94,12 +91,12 @@
                 case "":
                     // Fill in empty elements
                     deriveFallbackName(namesSuper, i, locale,
-                                       !exists(namesSuper, INDEX_DST_LONG));
+                                       !TimeZone.getTimeZone(id).useDaylightTime());
                     break;
                 case NO_INHERITANCE_MARKER:
                     // CLDR's "no inheritance marker"
                     namesSuper[i] = toGMTFormat(id, i == INDEX_DST_LONG || i == INDEX_DST_SHORT,
-                                                i % 2 != 0, locale);
+                                                locale);
                     break;
                 default:
                     break;
@@ -121,24 +118,19 @@
 
     @Override
     protected String[][] getZoneStrings(Locale locale) {
-        // Use English for the ROOT locale
-        locale = locale.equals(Locale.ROOT) ? Locale.ENGLISH : locale;
         String[][] ret = super.getZoneStrings(locale);
 
         // Fill in for the empty names.
-        // English names are prefilled for performance.
-        if (!locale.equals(Locale.ENGLISH) &&
-            !locale.equals(Locale.US)) {
-            for (int zoneIndex = 0; zoneIndex < ret.length; zoneIndex++) {
-                deriveFallbackNames(ret[zoneIndex], locale);
-            }
+        for (int zoneIndex = 0; zoneIndex < ret.length; zoneIndex++) {
+            deriveFallbackNames(ret[zoneIndex], locale);
         }
         return ret;
     }
 
     // Derive fallback time zone name according to LDML's logic
     private void deriveFallbackNames(String[] names, Locale locale) {
-        boolean noDST = !exists(names, INDEX_DST_LONG);
+        boolean noDST = !TimeZone.getTimeZone(names[0]).useDaylightTime();
+
         for (int i = INDEX_STD_LONG; i <= INDEX_GEN_SHORT; i++) {
             deriveFallbackName(names, i, locale, noDST);
         }
@@ -152,11 +144,25 @@
                 // CLDR's "no inheritance marker"
                 names[index] = toGMTFormat(id,
                                     index == INDEX_DST_LONG || index == INDEX_DST_SHORT,
-                                    index % 2 != 0, locale);
+                                    locale);
             }
             return;
         }
 
+        // Check parent locale first
+        if (!exists(names, index)) {
+            CLDRLocaleProviderAdapter clpa = (CLDRLocaleProviderAdapter)LocaleProviderAdapter.forType(Type.CLDR);
+            var cands = clpa.getCandidateLocales("", locale);
+            if (cands.size() > 1) {
+                var parentLoc = cands.get(1); // immediate parent locale
+                String[] parentNames = super.getDisplayNameArray(id, parentLoc);
+                if (parentNames != null && !parentNames[index].isEmpty()) {
+                    names[index] = parentNames[index];
+                    return;
+                }
+            }
+        }
+
         // Check if COMPAT can substitute the name
         if (LocaleProviderAdapter.getAdapterPreference().contains(Type.JRE)) {
             String[] compatNames = (String[])LocaleProviderAdapter.forJRE()
@@ -173,37 +179,34 @@
             }
         }
 
+        // Region Fallback
+        if (regionFormatFallback(names, index, locale)) {
+            return;
+        }
+
         // Type Fallback
         if (noDST && typeFallback(names, index)) {
             return;
         }
 
-        // Region Fallback
-        if (regionFormatFallback(names, index, locale)) {
-            return;
-        }
-
         // last resort
-        if (!id.toUpperCase(Locale.ROOT).startsWith("UT")) {
-            names[index] = toGMTFormat(id,
-                                       index == INDEX_DST_LONG || index == INDEX_DST_SHORT,
-                                       index % 2 != 0,
-                                       locale);
-            // aliases of "GMT" timezone.
-            if ((exists(names, INDEX_STD_LONG)) && (id.startsWith("Etc/")
-                    || id.startsWith("GMT") || id.startsWith("Greenwich"))) {
-                switch (id) {
-                case "Etc/GMT":
-                case "Etc/GMT-0":
-                case "Etc/GMT+0":
-                case "Etc/GMT0":
-                case "GMT+0":
-                case "GMT-0":
-                case "GMT0":
-                case "Greenwich":
-                    names[INDEX_DST_LONG] = names[INDEX_GEN_LONG] = names[INDEX_STD_LONG];
-                    break;
-                }
+        names[index] = toGMTFormat(id,
+                                   index == INDEX_DST_LONG || index == INDEX_DST_SHORT,
+                                   locale);
+        // aliases of "GMT" timezone.
+        if ((exists(names, INDEX_STD_LONG)) && (id.startsWith("Etc/")
+                || id.startsWith("GMT") || id.startsWith("Greenwich"))) {
+            switch (id) {
+            case "Etc/GMT":
+            case "Etc/GMT-0":
+            case "Etc/GMT+0":
+            case "Etc/GMT0":
+            case "GMT+0":
+            case "GMT-0":
+            case "GMT0":
+            case "Greenwich":
+                names[INDEX_DST_LONG] = names[INDEX_GEN_LONG] = names[INDEX_STD_LONG];
+                break;
             }
         }
     }
@@ -217,12 +220,12 @@
     private boolean typeFallback(String[] names, int index) {
         // check generic
         int genIndex = INDEX_GEN_SHORT - index % 2;
-        if (!exists(names, index) && exists(names, genIndex)) {
+        if (!exists(names, index) && exists(names, genIndex) && !names[genIndex].startsWith("GMT")) {
             names[index] = names[genIndex];
         } else {
             // check standard
             int stdIndex = INDEX_STD_SHORT - index % 2;
-            if (!exists(names, index) && exists(names, stdIndex)) {
+            if (!exists(names, index) && exists(names, stdIndex) && !names[stdIndex].startsWith("GMT")) {
                 names[index] = names[stdIndex];
             }
         }
@@ -235,6 +238,7 @@
         LocaleResources lr = LocaleProviderAdapter.forType(Type.CLDR).getLocaleResources(l);
         ResourceBundle fd = lr.getJavaTimeFormatData();
 
+        id = TimeZoneNameUtility.canonicalTZID(id).orElse(id);
         String rgn = (String) lr.getTimeZoneNames("timezone.excity." + id);
         if (rgn == null && !id.startsWith("Etc") && !id.startsWith("SystemV")) {
             int slash = id.lastIndexOf('/');
@@ -264,7 +268,7 @@
         return exists(names, index);
     }
 
-    private String toGMTFormat(String id, boolean daylight, boolean isShort, Locale l) {
+    private String toGMTFormat(String id, boolean daylight, Locale l) {
         TimeZone tz = ZoneInfoFile.getZoneInfo(id);
         int offset = (tz.getRawOffset() + (daylight ? tz.getDSTSavings() : 0)) / 60000;
         LocaleResources lr = LocaleProviderAdapter.forType(Type.CLDR).getLocaleResources(l);
@@ -283,7 +287,7 @@
                 offset = -offset;
             }
             hourFormat = hourFormat
-                .replaceFirst("H+", (isShort ? "\\%1\\$d" : "\\%1\\$02d"))
+                .replaceFirst("H+", "\\%1\\$02d")
                 .replaceFirst("m+", "\\%2\\$02d");
             return MessageFormat.format(gmtFormat,
                     String.format(l, hourFormat, offset / 60, offset % 60));
--- a/src/java.base/share/conf/security/java.security	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/share/conf/security/java.security	Wed Mar 04 12:01:01 2020 +0100
@@ -276,15 +276,18 @@
 policy.url.1=file:${java.home}/conf/security/java.policy
 policy.url.2=file:${user.home}/.java.policy
 
-# whether or not we expand properties in the policy file
-# if this is set to false, properties (${...}) will not be expanded in policy
-# files.
+# Controls whether or not properties are expanded in policy and login
+# configuration files. If set to false, properties (${...}) will not
+# be expanded in policy and login configuration files. If commented out or
+# set to an empty string, the default value is "false" for policy files and
+# "true" for login configuration files.
 #
 policy.expandProperties=true
 
-# whether or not we allow an extra policy to be passed on the command line
-# with -Djava.security.policy=somefile. Comment out this line to disable
-# this feature.
+# Controls whether or not an extra policy or login configuration file is
+# allowed to be passed on the command line with -Djava.security.policy=somefile
+# or -Djava.security.auth.login.config=somefile. If commented out or set to
+# an empty string, the default value is "false".
 #
 policy.allowSystemProperty=true
 
--- a/src/java.base/unix/native/libjava/childproc.c	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/unix/native/libjava/childproc.c	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
 
 #include "childproc.h"
 
+const char * const *parentPathv;
 
 ssize_t
 restartableWrite(int fd, const void *buf, size_t count)
--- a/src/java.base/unix/native/libjava/childproc.h	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.base/unix/native/libjava/childproc.h	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -126,7 +126,7 @@
  * The cached and split version of the JDK's effective PATH.
  * (We don't support putenv("PATH=...") in native code)
  */
-const char * const *parentPathv;
+extern const char * const *parentPathv;
 
 ssize_t restartableWrite(int fd, const void *buf, size_t count);
 int restartableDup2(int fd_from, int fd_to);
--- a/src/java.desktop/macosx/classes/sun/lwawt/LWComponentPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/macosx/classes/sun/lwawt/LWComponentPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,8 +53,6 @@
 import java.awt.event.MouseWheelEvent;
 import java.awt.event.PaintEvent;
 import java.awt.image.ColorModel;
-import java.awt.image.ImageObserver;
-import java.awt.image.ImageProducer;
 import java.awt.image.VolatileImage;
 import java.awt.peer.ComponentPeer;
 import java.awt.peer.ContainerPeer;
@@ -75,7 +73,6 @@
 import sun.awt.SunToolkit;
 import sun.awt.event.IgnorePaintEvent;
 import sun.awt.image.SunVolatileImage;
-import sun.awt.image.ToolkitImage;
 import sun.java2d.SunGraphics2D;
 import sun.java2d.opengl.OGLRenderQueue;
 import sun.java2d.pipe.Region;
@@ -994,11 +991,6 @@
     }
 
     @Override
-    public final Image createImage(final ImageProducer producer) {
-        return new ToolkitImage(producer);
-    }
-
-    @Override
     public final Image createImage(final int width, final int height) {
         return getLWGC().createAcceleratedImage(getTarget(), width, height);
     }
@@ -1009,18 +1001,6 @@
     }
 
     @Override
-    public boolean prepareImage(Image img, int w, int h, ImageObserver o) {
-        // TODO: is it a right/complete implementation?
-        return Toolkit.getDefaultToolkit().prepareImage(img, w, h, o);
-    }
-
-    @Override
-    public int checkImage(Image img, int w, int h, ImageObserver o) {
-        // TODO: is it a right/complete implementation?
-        return Toolkit.getDefaultToolkit().checkImage(img, w, h, o);
-    }
-
-    @Override
     public boolean handlesWheelScrolling() {
         // TODO: not implemented
         return false;
--- a/src/java.desktop/macosx/classes/sun/lwawt/macosx/CFileDialog.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/macosx/classes/sun/lwawt/macosx/CFileDialog.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,15 +25,34 @@
 
 package sun.lwawt.macosx;
 
-import java.awt.*;
+import java.awt.AWTEvent;
+import java.awt.AWTException;
+import java.awt.BufferCapabilities;
+import java.awt.BufferCapabilities.FlipContents;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Dialog;
+import java.awt.Dimension;
+import java.awt.FileDialog;
+import java.awt.Font;
+import java.awt.FontMetrics;
+import java.awt.Graphics;
+import java.awt.GraphicsConfiguration;
+import java.awt.Image;
+import java.awt.Insets;
+import java.awt.Point;
+import java.awt.Window;
 import java.awt.event.FocusEvent.Cause;
-import java.awt.peer.*;
-import java.awt.BufferCapabilities.FlipContents;
-import java.awt.event.*;
-import java.awt.image.*;
+import java.awt.event.PaintEvent;
+import java.awt.image.ColorModel;
+import java.awt.image.VolatileImage;
+import java.awt.peer.ComponentPeer;
+import java.awt.peer.ContainerPeer;
+import java.awt.peer.FileDialogPeer;
+import java.io.File;
+import java.io.FilenameFilter;
 import java.security.AccessController;
 import java.util.List;
-import java.io.*;
 
 import sun.awt.AWTAccessor;
 import sun.java2d.pipe.Region;
@@ -249,11 +268,6 @@
     }
 
     @Override
-    public int checkImage(Image img, int w, int h, ImageObserver o) {
-        return 0;
-    }
-
-    @Override
     public void coalescePaintEvent(PaintEvent e) {
     }
 
@@ -263,11 +277,6 @@
     }
 
     @Override
-    public Image createImage(ImageProducer producer) {
-        return null;
-    }
-
-    @Override
     public Image createImage(int width, int height) {
         return null;
     }
@@ -358,11 +367,6 @@
     }
 
     @Override
-    public boolean prepareImage(Image img, int w, int h, ImageObserver o) {
-        return false;
-    }
-
-    @Override
     public void print(Graphics g) {
     }
 
--- a/src/java.desktop/share/classes/java/awt/Component.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/java/awt/Component.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1995, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3633,10 +3633,6 @@
      * @since     1.0
      */
     public Image createImage(ImageProducer producer) {
-        ComponentPeer peer = this.peer;
-        if ((peer != null) && ! (peer instanceof LightweightPeer)) {
-            return peer.createImage(producer);
-        }
         return getToolkit().createImage(producer);
     }
 
@@ -3752,16 +3748,7 @@
      */
     public boolean prepareImage(Image image, int width, int height,
                                 ImageObserver observer) {
-        ComponentPeer peer = this.peer;
-        if (peer instanceof LightweightPeer) {
-            return (parent != null)
-                ? parent.prepareImage(image, width, height, observer)
-                : getToolkit().prepareImage(image, width, height, observer);
-        } else {
-            return (peer != null)
-                ? peer.prepareImage(image, width, height, observer)
-                : getToolkit().prepareImage(image, width, height, observer);
-        }
+        return getToolkit().prepareImage(image, width, height, observer);
     }
 
     /**
@@ -3824,16 +3811,7 @@
      */
     public int checkImage(Image image, int width, int height,
                           ImageObserver observer) {
-        ComponentPeer peer = this.peer;
-        if (peer instanceof LightweightPeer) {
-            return (parent != null)
-                ? parent.checkImage(image, width, height, observer)
-                : getToolkit().checkImage(image, width, height, observer);
-        } else {
-            return (peer != null)
-                ? peer.checkImage(image, width, height, observer)
-                : getToolkit().checkImage(image, width, height, observer);
-        }
+        return getToolkit().checkImage(image, width, height, observer);
     }
 
     /**
--- a/src/java.desktop/share/classes/java/awt/RenderingHints.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/java/awt/RenderingHints.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,14 +25,14 @@
 
 package java.awt;
 
-import java.util.Map;
-import java.util.Set;
+import java.lang.ref.WeakReference;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
 import sun.awt.SunHints;
-import java.lang.ref.WeakReference;
 
 /**
  * The {@code RenderingHints} class defines and manages collections of
@@ -1255,8 +1255,7 @@
      * returned {@code Set} is a {@code Map.Entry}.
      * The {@code Set} is backed by the {@code RenderingHints},
      * so changes to the {@code RenderingHints} are reflected
-     * in the {@code Set}, and vice-versa.  If the
-     * {@code RenderingHints} is modified while
+     * in the {@code Set}. If the {@code RenderingHints} is modified while
      * while an iteration over the {@code Set} is in progress,
      * the results of the iteration are undefined.
      * <p>
--- a/src/java.desktop/share/classes/java/awt/peer/ComponentPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/java/awt/peer/ComponentPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,17 +25,26 @@
 
 package java.awt.peer;
 
-import java.awt.*;
+import java.awt.AWTEvent;
+import java.awt.AWTException;
+import java.awt.BufferCapabilities;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Dimension;
+import java.awt.EventQueue;
+import java.awt.Font;
+import java.awt.FontMetrics;
+import java.awt.Graphics;
+import java.awt.GraphicsConfiguration;
+import java.awt.Image;
+import java.awt.Point;
+import java.awt.event.FocusEvent.Cause;
 import java.awt.event.PaintEvent;
-import java.awt.event.FocusEvent.Cause;
 import java.awt.image.ColorModel;
-import java.awt.image.ImageObserver;
-import java.awt.image.ImageProducer;
 import java.awt.image.VolatileImage;
 
 import sun.java2d.pipe.Region;
 
-
 /**
  * The peer interface for {@link Component}. This is the top level peer
  * interface for widgets and defines the bulk of methods for AWT component
@@ -355,18 +364,6 @@
     boolean isFocusable();
 
     /**
-     * Creates an image using the specified image producer.
-     *
-     * @param producer the image producer from which the image pixels will be
-     *        produced
-     *
-     * @return the created image
-     *
-     * @see Component#createImage(ImageProducer)
-     */
-    Image createImage(ImageProducer producer);
-
-    /**
      * Creates an empty image with the specified width and height. This is
      * generally used as a non-accelerated backbuffer for drawing onto the
      * component (e.g. by Swing).
@@ -398,38 +395,6 @@
     VolatileImage createVolatileImage(int width, int height);
 
     /**
-     * Prepare the specified image for rendering on this component. This should
-     * start loading the image (if not already loaded) and create an
-     * appropriate screen representation.
-     *
-     * @param img the image to prepare
-     * @param w the width of the screen representation
-     * @param h the height of the screen representation
-     * @param o an image observer to observe the progress
-     *
-     * @return {@code true} if the image is already fully prepared,
-     *         {@code false} otherwise
-     *
-     * @see Component#prepareImage(Image, int, int, ImageObserver)
-     */
-    boolean prepareImage(Image img, int w, int h, ImageObserver o);
-
-    /**
-     * Determines the status of the construction of the screen representation
-     * of the specified image.
-     *
-     * @param img the image to check
-     * @param w the target width
-     * @param h the target height
-     * @param o the image observer to notify
-     *
-     * @return the status as bitwise ORed ImageObserver flags
-     *
-     * @see Component#checkImage(Image, int, int, ImageObserver)
-     */
-    int checkImage(Image img, int w, int h, ImageObserver o);
-
-    /**
      * Returns the graphics configuration that corresponds to this component.
      *
      * @return the graphics configuration that corresponds to this component
--- a/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/javax/swing/plaf/basic/BasicTreeUI.java	Wed Mar 04 12:01:01 2020 +0100
@@ -403,9 +403,12 @@
     //
 
     /**
-     * Updates the componentListener, if necessary.
+     * Sets the {@code largeModel}.
      *
-     * @param largeModel the new value
+     * Called when the {@code largeModel} property is changed in the drawn tree
+     * component.
+     *
+     * @param largeModel the new value of the {@code largeModel} property
      */
     protected void setLargeModel(boolean largeModel) {
         if(getRowHeight() < 1)
@@ -432,7 +435,10 @@
     /**
      * Sets the row height, this is forwarded to the treeState.
      *
-     * @param rowHeight the row height
+     * Called when the {@code rowHeight} property is changed in
+     * the drawn tree component.
+     *
+     * @param rowHeight the new value of the {@code rowHeight} property
      */
     protected void setRowHeight(int rowHeight) {
         completeEditing();
@@ -444,9 +450,11 @@
     }
 
     /**
-     * Returns the row height.
+     * Returns the height of each row in the drawn tree component. If the
+     * returned value is less than or equal to 0 the height for each row is
+     * determined by the renderer.
      *
-     * @return the row height
+     * @return the height of each row, in pixels
      */
     protected int getRowHeight() {
         return (tree == null) ? -1 : tree.getRowHeight();
@@ -456,7 +464,10 @@
      * Sets the {@code TreeCellRenderer} to {@code tcr}. This invokes
      * {@code updateRenderer}.
      *
-     * @param tcr the new value
+     * Called when the {@code cellRenderer} property is changed in
+     * the drawn tree component.
+     *
+     * @param tcr the new value of the {@code cellRenderer} property
      */
     protected void setCellRenderer(TreeCellRenderer tcr) {
         completeEditing();
@@ -468,10 +479,10 @@
     }
 
     /**
-     * Return {@code currentCellRenderer}, which will either be the trees
-     * renderer, or {@code defaultCellRenderer}, which ever wasn't null.
+     * Returns the current instance of the {@link TreeCellRenderer} that is
+     * rendering each cell.
      *
-     * @return an instance of {@code TreeCellRenderer}
+     * @return the {@link TreeCellRenderer} instance
      */
     protected TreeCellRenderer getCellRenderer() {
         return currentCellRenderer;
@@ -510,7 +521,10 @@
     /**
      * Sets the root to being visible.
      *
-     * @param newValue the new value
+     * Called when the {@code rootVisible} property is changed in the drawn tree
+     * component.
+     *
+     * @param newValue the new value of the {@code rootVisible} property
      */
     protected void setRootVisible(boolean newValue) {
         completeEditing();
@@ -523,9 +537,9 @@
     }
 
     /**
-     * Returns {@code true} if the tree root is visible.
+     * Returns whether the root node of the drawn tree component should be displayed.
      *
-     * @return {@code true} if the tree root is visible
+     * @return {@code true} if the root node of the tree is displayed
      */
     protected boolean isRootVisible() {
         return (tree != null) ? tree.isRootVisible() : false;
@@ -534,7 +548,10 @@
     /**
      * Determines whether the node handles are to be displayed.
      *
-     * @param newValue the new value
+     * Called when the {@code showsRootHandles} property is changed in the drawn
+     * tree component.
+     *
+     * @param newValue the new value of the {@code showsRootHandles} property
      */
     protected void setShowsRootHandles(boolean newValue) {
         completeEditing();
@@ -557,16 +574,20 @@
     /**
      * Sets the cell editor.
      *
-     * @param editor the new cell editor
+     * Called when the {@code cellEditor} property is changed in the drawn tree
+     * component.
+     *
+     * @param editor the new value of the {@code cellEditor} property
      */
     protected void setCellEditor(TreeCellEditor editor) {
         updateCellEditor();
     }
 
     /**
-     * Returns an instance of {@code TreeCellEditor}.
+     * Returns the editor used to edit entries in the drawn tree component, or
+     * {@code null} if the tree cannot be edited.
      *
-     * @return an instance of {@code TreeCellEditor}
+     * @return the {@link TreeCellEditor} instance, or {@code null}
      */
     protected TreeCellEditor getCellEditor() {
         return (tree != null) ? tree.getCellEditor() : null;
@@ -575,14 +596,17 @@
     /**
      * Configures the receiver to allow, or not allow, editing.
      *
-     * @param newValue the new value
+     * Called when the {@code editable} property is changed in the drawn tree
+     * component.
+     *
+     * @param newValue the new value of the {@code editable} property
      */
     protected void setEditable(boolean newValue) {
         updateCellEditor();
     }
 
     /**
-     * Returns {@code true} if the tree is editable.
+     * Returns whether the drawn tree component should be enabled for editing.
      *
      * @return {@code true} if the tree is editable
      */
@@ -594,7 +618,10 @@
      * Resets the selection model. The appropriate listener are installed
      * on the model.
      *
-     * @param newLSM new selection model
+     * Called when the {@code selectionModel} property is changed in the drawn tree
+     * component.
+     *
+     * @param newLSM the new value of the {@code selectionModel} property
      */
     protected void setSelectionModel(TreeSelectionModel newLSM) {
         completeEditing();
@@ -623,9 +650,10 @@
     }
 
     /**
-     * Returns the tree selection model.
+     * Returns the current instance of the {@link TreeSelectionModel} which is
+     * the model for selections.
      *
-     * @return the tree selection model
+     * @return the {@link TreeSelectionModel} instance
      */
     protected TreeSelectionModel getSelectionModel() {
         return treeSelectionModel;
--- a/src/java.desktop/share/classes/sun/awt/NullComponentPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/sun/awt/NullComponentPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,29 +31,27 @@
 import java.awt.Component;
 import java.awt.Cursor;
 import java.awt.Dimension;
+import java.awt.Event;
 import java.awt.Font;
 import java.awt.FontMetrics;
 import java.awt.Graphics;
 import java.awt.GraphicsConfiguration;
 import java.awt.Image;
 import java.awt.Insets;
+import java.awt.Point;
+import java.awt.Rectangle;
 import java.awt.event.FocusEvent.Cause;
-import java.awt.Point;
-import java.awt.Event;
 import java.awt.event.PaintEvent;
 import java.awt.image.ColorModel;
-import java.awt.image.ImageObserver;
-import java.awt.image.ImageProducer;
 import java.awt.image.VolatileImage;
 import java.awt.peer.CanvasPeer;
+import java.awt.peer.ComponentPeer;
+import java.awt.peer.ContainerPeer;
 import java.awt.peer.LightweightPeer;
 import java.awt.peer.PanelPeer;
-import java.awt.peer.ComponentPeer;
-import java.awt.peer.ContainerPeer;
-import java.awt.Rectangle;
+
 import sun.java2d.pipe.Region;
 
-
 /**
  * Implements the LightweightPeer interface for use in lightweight components
  * that have no native window associated with them.  This gets created by
@@ -74,7 +72,6 @@
  * @author Timothy Prinzing
  * @author Michael Martak
  */
-
 public class NullComponentPeer implements LightweightPeer,
     CanvasPeer, PanelPeer {
 
@@ -183,22 +180,10 @@
         return false;
     }
 
-    public Image createImage(ImageProducer producer) {
-        return null;
-    }
-
     public Image createImage(int width, int height) {
         return null;
     }
 
-    public boolean prepareImage(Image img, int w, int h, ImageObserver o) {
-        return false;
-    }
-
-    public int  checkImage(Image img, int w, int h, ImageObserver o) {
-        return 0;
-    }
-
     public Dimension preferredSize() {
         return getPreferredSize();
     }
--- a/src/java.desktop/share/classes/sun/font/FileFontStrike.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/sun/font/FileFontStrike.java	Wed Mar 04 12:01:01 2020 +0100
@@ -328,7 +328,8 @@
                                                   int style,
                                                   int size,
                                                   int glyphCode,
-                                                  boolean fracMetrics);
+                                                  boolean fracMetrics,
+                                                  int fontDataSize);
 
     long getGlyphImageFromWindows(int glyphCode) {
         String family = fileFont.getFamilyName(null);
@@ -337,7 +338,8 @@
         int size = intPtSize;
         long ptr = _getGlyphImageFromWindows
             (family, style, size, glyphCode,
-             desc.fmHint == INTVAL_FRACTIONALMETRICS_ON);
+             desc.fmHint == INTVAL_FRACTIONALMETRICS_ON,
+             ((TrueTypeFont)fileFont).fontDataSize);
         if (ptr != 0) {
             /* Get the advance from the JDK rasterizer. This is mostly
              * necessary for the fractional metrics case, but there are
@@ -351,6 +353,12 @@
                                         advance);
             return ptr;
         } else {
+            if (FontUtilities.isLogging()) {
+                FontUtilities.getLogger().warning(
+                        "Failed to render glyph using GDI: code=" + glyphCode
+                                + ", fontFamily=" + family + ", style=" + style
+                                + ", size=" + size);
+            }
             return fileFont.getGlyphImage(pScalerContext, glyphCode);
         }
     }
--- a/src/java.desktop/share/classes/sun/font/SunFontManager.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/sun/font/SunFontManager.java	Wed Mar 04 12:01:01 2020 +0100
@@ -129,6 +129,8 @@
         }
     }
 
+    private static Font2DHandle FONT_HANDLE_NULL = new Font2DHandle(null);
+
      public static final int FONTFORMAT_NONE = -1;
      public static final int FONTFORMAT_TRUETYPE = 0;
      public static final int FONTFORMAT_TYPE1 = 1;
@@ -949,7 +951,7 @@
                             .info("Opening deferred font file " + fileNameKey);
         }
 
-        PhysicalFont physicalFont;
+        PhysicalFont physicalFont = null;
         FontRegistrationInfo regInfo = deferredFontFiles.get(fileNameKey);
         if (regInfo != null) {
             deferredFontFiles.remove(fileNameKey);
@@ -959,21 +961,19 @@
                                             regInfo.javaRasterizer,
                                             regInfo.fontRank);
 
-
             if (physicalFont != null) {
                 /* Store the handle, so that if a font is bad, we
                  * retrieve the substituted font.
                  */
                 initialisedFonts.put(fileNameKey, physicalFont.handle);
             } else {
-                initialisedFonts.put(fileNameKey,
-                                     getDefaultPhysicalFont().handle);
+                initialisedFonts.put(fileNameKey, FONT_HANDLE_NULL);
             }
         } else {
             Font2DHandle handle = initialisedFonts.get(fileNameKey);
             if (handle == null) {
                 /* Probably shouldn't happen, but just in case */
-                physicalFont = getDefaultPhysicalFont();
+                initialisedFonts.put(fileNameKey, FONT_HANDLE_NULL);
             } else {
                 physicalFont = (PhysicalFont)(handle.font2D);
             }
@@ -1080,15 +1080,20 @@
      */
     public PhysicalFont getDefaultPhysicalFont() {
         if (defaultPhysicalFont == null) {
-            /* findFont2D will load all fonts before giving up the search.
-             * If the JRE Lucida isn't found (eg because the JRE fonts
-             * directory is missing), it could find another version of Lucida
-             * from the host system. This is OK because at that point we are
-             * trying to gracefully handle/recover from a system
-             * misconfiguration and this is probably a reasonable substitution.
-             */
-            defaultPhysicalFont = (PhysicalFont)
-                findFont2D(getDefaultFontFaceName(), Font.PLAIN, NO_FALLBACK);
+            String defaultFontName = getDefaultFontFaceName();
+            // findFont2D will load all fonts
+            Font2D font2d = findFont2D(defaultFontName, Font.PLAIN, NO_FALLBACK);
+            if (font2d != null) {
+                if (font2d instanceof PhysicalFont) {
+                    defaultPhysicalFont = (PhysicalFont)font2d;
+                } else {
+                    if (FontUtilities.isLogging()) {
+                        FontUtilities.getLogger()
+                            .warning("Font returned by findFont2D for default font name " +
+                                     defaultFontName + " is not a physical font: " + font2d.getFontName(null));
+                    }
+                }
+            }
             if (defaultPhysicalFont == null) {
                 /* Because of the findFont2D call above, if we reach here, we
                  * know all fonts have already been loaded, just accept any
@@ -1096,12 +1101,8 @@
                  * and I don't know how to recover from there being absolutely
                  * no fonts anywhere on the system.
                  */
-                Iterator<PhysicalFont> i = physicalFonts.values().iterator();
-                if (i.hasNext()) {
-                    defaultPhysicalFont = i.next();
-                } else {
-                    throw new Error("Probable fatal error:No fonts found.");
-                }
+                defaultPhysicalFont = physicalFonts.values().stream().findFirst()
+                    .orElseThrow(()->new Error("Probable fatal error: No physical fonts found."));
             }
         }
         return defaultPhysicalFont;
--- a/src/java.desktop/share/classes/sun/font/TrueTypeFont.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/sun/font/TrueTypeFont.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,9 @@
 import java.nio.ShortBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.FileChannel;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -180,6 +183,15 @@
     private String localeFamilyName;
     private String localeFullName;
 
+    /*
+     * Used on Windows to validate the font selected by GDI for (sub-pixel
+     * antialiased) rendering. For 'standalone' fonts it's equal to the font
+     * file size, for collection (TTC, OTC) members it's the number of bytes in
+     * the collection file from the start of this font's offset table till the
+     * end of the file.
+     */
+    int fontDataSize;
+
     public TrueTypeFont(String platname, Object nativeNames, int fIndex,
                  boolean javaRasterizer)
         throws FontFormatException
@@ -312,15 +324,10 @@
                 FontUtilities.getLogger().info("open TTF: " + platName);
             }
             try {
-                RandomAccessFile raf = (RandomAccessFile)
-                java.security.AccessController.doPrivileged(
-                    new java.security.PrivilegedAction<Object>() {
-                        public Object run() {
-                            try {
-                                return new RandomAccessFile(platName, "r");
-                            } catch (FileNotFoundException ffne) {
-                            }
-                            return null;
+                RandomAccessFile raf = AccessController.doPrivileged(
+                    new PrivilegedExceptionAction<RandomAccessFile>() {
+                        public RandomAccessFile run() throws FileNotFoundException {
+                            return new RandomAccessFile(platName, "r");
                     }
                 });
                 disposerRecord.channel = raf.getChannel();
@@ -331,9 +338,13 @@
                         ((SunFontManager) fm).addToPool(this);
                     }
                 }
-            } catch (NullPointerException e) {
+            } catch (PrivilegedActionException e) {
                 close();
-                throw new FontFormatException(e.toString());
+                Throwable reason = e.getCause();
+                if (reason == null) {
+                    reason = e;
+                }
+                throw new FontFormatException(reason.toString());
             } catch (ClosedChannelException e) {
                 /* NIO I/O is interruptible, recurse to retry operation.
                  * The call to channel.size() above can throw this exception.
@@ -537,11 +548,13 @@
                 fontIndex = fIndex;
                 buffer = readBlock(TTCHEADERSIZE+4*fIndex, 4);
                 headerOffset = buffer.getInt();
+                fontDataSize = Math.max(0, fileSize - headerOffset);
                 break;
 
             case v1ttTag:
             case trueTag:
             case ottoTag:
+                fontDataSize = fileSize;
                 break;
 
             default:
--- a/src/java.desktop/share/classes/sun/java2d/SunGraphicsEnvironment.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/share/classes/sun/java2d/SunGraphicsEnvironment.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -364,7 +364,38 @@
         AffineTransform tx = gc.getDefaultTransform();
         x = Region.clipRound(x * tx.getScaleX());
         y = Region.clipRound(y * tx.getScaleY());
-
         return new Point((int) x, (int) y);
     }
+
+    /**
+     * Converts bounds from the user's space to the device space using
+     * appropriate device transformation.
+     *
+     * @param  bounds the rectangle in the user space
+     * @return the rectangle which uses device space(pixels)
+     */
+    public static Rectangle convertToDeviceSpace(Rectangle bounds) {
+        GraphicsConfiguration gc = getLocalGraphicsEnvironment()
+                .getDefaultScreenDevice().getDefaultConfiguration();
+        gc = getGraphicsConfigurationAtPoint(gc, bounds.x, bounds.y);
+        return convertToDeviceSpace(gc, bounds);
+    }
+
+    /**
+     * Converts bounds from the user's space to the device space using
+     * appropriate device transformation of the passed graphics configuration.
+     *
+     * @param  bounds the rectangle in the user space
+     * @return the rectangle which uses device space(pixels)
+     */
+    public static Rectangle convertToDeviceSpace(GraphicsConfiguration gc,
+                                                 Rectangle bounds) {
+        AffineTransform tx = gc.getDefaultTransform();
+        return new Rectangle(
+                Region.clipRound(bounds.x * tx.getScaleX()),
+                Region.clipRound(bounds.y * tx.getScaleY()),
+                Region.clipRound(bounds.width * tx.getScaleX()),
+                Region.clipRound(bounds.height * tx.getScaleY())
+        );
+    }
 }
--- a/src/java.desktop/unix/classes/sun/awt/X11/XComponentPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/unix/classes/sun/awt/X11/XComponentPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
  * or visit www.oracle.com if you need additional information or have any
  * questions.
  */
+
 package sun.awt.X11;
 
 import java.awt.AWTEvent;
@@ -52,8 +53,6 @@
 import java.awt.event.MouseWheelEvent;
 import java.awt.event.PaintEvent;
 import java.awt.event.WindowEvent;
-import java.awt.image.ImageObserver;
-import java.awt.image.ImageProducer;
 import java.awt.image.VolatileImage;
 import java.awt.peer.ComponentPeer;
 import java.awt.peer.ContainerPeer;
@@ -61,14 +60,15 @@
 import java.util.Objects;
 import java.util.Set;
 
+import sun.awt.AWTAccessor;
 import sun.awt.AWTAccessor.ComponentAccessor;
-import sun.util.logging.PlatformLogger;
-import sun.awt.*;
+import sun.awt.SunToolkit;
+import sun.awt.X11GraphicsConfig;
 import sun.awt.event.IgnorePaintEvent;
 import sun.awt.image.SunVolatileImage;
-import sun.awt.image.ToolkitImage;
 import sun.java2d.BackBufferCapsProvider;
 import sun.java2d.pipe.Region;
+import sun.util.logging.PlatformLogger;
 
 
 public class XComponentPeer extends XWindow implements ComponentPeer, DropTargetPeer,
@@ -710,10 +710,6 @@
         }
     }
 
-    public Image createImage(ImageProducer producer) {
-        return new ToolkitImage(producer);
-    }
-
     public Image createImage(int width, int height) {
         return graphicsConfig.createAcceleratedImage(target, width, height);
     }
@@ -722,14 +718,6 @@
         return new SunVolatileImage(target, width, height);
     }
 
-    public boolean prepareImage(Image img, int w, int h, ImageObserver o) {
-        return Toolkit.getDefaultToolkit().prepareImage(img, w, h, o);
-    }
-
-    public int checkImage(Image img, int w, int h, ImageObserver o) {
-        return Toolkit.getDefaultToolkit().checkImage(img, w, h, o);
-    }
-
     public Insets getInsets() {
         return new Insets(0, 0, 0, 0);
     }
--- a/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/unix/classes/sun/awt/X11/XEmbedChildProxyPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,15 +25,34 @@
 
 package sun.awt.X11;
 
-import java.awt.*;
-import java.awt.event.*;
+import java.awt.AWTEvent;
+import java.awt.AWTException;
+import java.awt.BufferCapabilities;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Container;
+import java.awt.Dimension;
+import java.awt.Font;
+import java.awt.FontMetrics;
+import java.awt.Graphics;
+import java.awt.GraphicsConfiguration;
+import java.awt.Image;
+import java.awt.Point;
+import java.awt.Rectangle;
+import java.awt.Toolkit;
+import java.awt.Window;
+import java.awt.event.ComponentEvent;
+import java.awt.event.FocusEvent;
+import java.awt.event.InputEvent;
+import java.awt.event.InvocationEvent;
+import java.awt.event.KeyEvent;
+import java.awt.event.PaintEvent;
 import java.awt.image.ColorModel;
-import java.awt.image.ImageObserver;
-import java.awt.image.ImageProducer;
 import java.awt.image.VolatileImage;
-import java.awt.peer.*;
+import java.awt.peer.ComponentPeer;
+import java.awt.peer.ContainerPeer;
+
 import sun.java2d.pipe.Region;
-import sun.awt.*;
 
 public class XEmbedChildProxyPeer implements ComponentPeer, XEventDispatcher{
     XEmbeddingContainer container;
@@ -248,11 +267,8 @@
         return true;
     }
 
-    public Image                createImage(ImageProducer producer) { return null; }
     public Image                createImage(int width, int height) { return null; }
     public VolatileImage        createVolatileImage(int width, int height) { return null; }
-    public boolean              prepareImage(Image img, int w, int h, ImageObserver o) { return false; }
-    public int                  checkImage(Image img, int w, int h, ImageObserver o) { return 0; }
     public GraphicsConfiguration getGraphicsConfiguration() { return null; }
     public boolean     handlesWheelScrolling() { return true; }
     public void createBuffers(int numBuffers, BufferCapabilities caps)
--- a/src/java.desktop/windows/classes/sun/awt/windows/WChoicePeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/windows/classes/sun/awt/windows/WChoicePeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,14 +22,19 @@
  * or visit www.oracle.com if you need additional information or have any
  * questions.
  */
+
 package sun.awt.windows;
 
-import java.awt.*;
-import java.awt.peer.*;
+import java.awt.Choice;
+import java.awt.Component;
+import java.awt.Dimension;
+import java.awt.FontMetrics;
+import java.awt.Window;
 import java.awt.event.ItemEvent;
+import java.awt.event.WindowAdapter;
 import java.awt.event.WindowEvent;
 import java.awt.event.WindowListener;
-import java.awt.event.WindowAdapter;
+import java.awt.peer.ChoicePeer;
 
 import sun.awt.AWTAccessor;
 import sun.awt.SunToolkit;
@@ -154,12 +159,5 @@
         });
     }
 
-    int getDropDownHeight() {
-        Choice c = (Choice)target;
-        FontMetrics fm = getFontMetrics(c.getFont());
-        int maxItems = Math.min(c.getItemCount(), 8);
-        return fm.getHeight() * maxItems;
-    }
-
     native void closeList();
 }
--- a/src/java.desktop/windows/classes/sun/awt/windows/WComponentPeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/windows/classes/sun/awt/windows/WComponentPeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,40 +24,53 @@
  */
 package sun.awt.windows;
 
-import java.awt.*;
-import java.awt.peer.*;
-import java.awt.image.VolatileImage;
-import sun.awt.RepaintArea;
-import sun.awt.image.SunVolatileImage;
-import sun.awt.image.ToolkitImage;
-import java.awt.image.BufferedImage;
-import java.awt.image.ImageProducer;
-import java.awt.image.ImageObserver;
-import java.awt.image.ColorModel;
-import java.awt.event.PaintEvent;
+import java.awt.AWTEvent;
+import java.awt.AWTException;
+import java.awt.BufferCapabilities;
+import java.awt.Color;
+import java.awt.Component;
+import java.awt.Container;
+import java.awt.Dimension;
+import java.awt.Font;
+import java.awt.FontMetrics;
+import java.awt.Graphics;
+import java.awt.GraphicsConfiguration;
+import java.awt.GraphicsDevice;
+import java.awt.Image;
+import java.awt.Point;
+import java.awt.Rectangle;
+import java.awt.SystemColor;
+import java.awt.Window;
+import java.awt.dnd.DropTarget;
+import java.awt.dnd.peer.DropTargetPeer;
+import java.awt.event.FocusEvent;
+import java.awt.event.InputEvent;
 import java.awt.event.InvocationEvent;
 import java.awt.event.KeyEvent;
-import java.awt.event.FocusEvent;
 import java.awt.event.MouseEvent;
 import java.awt.event.MouseWheelEvent;
-import java.awt.event.InputEvent;
+import java.awt.event.PaintEvent;
+import java.awt.geom.AffineTransform;
+import java.awt.image.BufferedImage;
+import java.awt.image.ColorModel;
+import java.awt.image.VolatileImage;
+import java.awt.peer.ComponentPeer;
+import java.awt.peer.ContainerPeer;
+
+import sun.awt.AWTAccessor;
+import sun.awt.PaintEventDispatcher;
+import sun.awt.RepaintArea;
+import sun.awt.SunToolkit;
 import sun.awt.Win32GraphicsConfig;
 import sun.awt.Win32GraphicsEnvironment;
+import sun.awt.event.IgnorePaintEvent;
+import sun.awt.image.SunVolatileImage;
 import sun.java2d.InvalidPipeException;
+import sun.java2d.ScreenUpdateManager;
 import sun.java2d.SurfaceData;
-import sun.java2d.ScreenUpdateManager;
 import sun.java2d.d3d.D3DSurfaceData;
 import sun.java2d.opengl.OGLSurfaceData;
 import sun.java2d.pipe.Region;
-import sun.awt.PaintEventDispatcher;
-import sun.awt.SunToolkit;
-import sun.awt.event.IgnorePaintEvent;
-
-import java.awt.dnd.DropTarget;
-import java.awt.dnd.peer.DropTargetPeer;
-import java.awt.geom.AffineTransform;
-import sun.awt.AWTAccessor;
-
 import sun.util.logging.PlatformLogger;
 
 public abstract class WComponentPeer extends WObjectPeer
@@ -751,11 +764,6 @@
     }
 
     @Override
-    public Image createImage(ImageProducer producer) {
-        return new ToolkitImage(producer);
-    }
-
-    @Override
     public Image createImage(int width, int height) {
         Win32GraphicsConfig gc =
             (Win32GraphicsConfig)getGraphicsConfiguration();
@@ -767,16 +775,6 @@
         return new SunVolatileImage((Component)target, width, height);
     }
 
-    @Override
-    public boolean prepareImage(Image img, int w, int h, ImageObserver o) {
-        return Toolkit.getDefaultToolkit().prepareImage(img, w, h, o);
-    }
-
-    @Override
-    public int checkImage(Image img, int w, int h, ImageObserver o) {
-        return Toolkit.getDefaultToolkit().checkImage(img, w, h, o);
-    }
-
     // Object overrides
 
     public String toString() {
--- a/src/java.desktop/windows/classes/sun/awt/windows/WFramePeer.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/windows/classes/sun/awt/windows/WFramePeer.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,15 +22,24 @@
  * or visit www.oracle.com if you need additional information or have any
  * questions.
  */
+
 package sun.awt.windows;
 
-import java.awt.*;
-import java.awt.peer.*;
+import java.awt.Component;
+import java.awt.Dimension;
+import java.awt.Frame;
+import java.awt.GraphicsConfiguration;
+import java.awt.MenuBar;
+import java.awt.Rectangle;
+import java.awt.peer.FramePeer;
+import java.security.AccessController;
+
 import sun.awt.AWTAccessor;
 import sun.awt.im.InputMethodManager;
-import java.security.AccessController;
 import sun.security.action.GetPropertyAction;
 
+import static sun.java2d.SunGraphicsEnvironment.convertToDeviceSpace;
+
 class WFramePeer extends WWindowPeer implements FramePeer {
 
     static {
@@ -65,49 +74,51 @@
             "sun.awt.keepWorkingSetOnMinimize")));
 
     @Override
-    public void setMaximizedBounds(Rectangle b) {
+    public final void setMaximizedBounds(Rectangle b) {
         if (b == null) {
             clearMaximizedBounds();
         } else {
-            Rectangle adjBounds = (Rectangle)b.clone();
-            adjustMaximizedBounds(adjBounds);
-            setMaximizedBounds(adjBounds.x, adjBounds.y, adjBounds.width, adjBounds.height);
+            b = adjustMaximizedBounds(b);
+            setMaximizedBounds(b.x, b.y, b.width, b.height);
         }
     }
 
     /**
      * The incoming bounds describe the maximized size and position of the
-     * window on the monitor that displays the window. But the window manager
-     * expects that the bounds are based on the size and position of the
-     * primary monitor, even if the window ultimately maximizes onto a
-     * secondary monitor. And the window manager adjusts these values to
-     * compensate for differences between the primary monitor and the monitor
-     * that displays the window.
+     * window in the virtual coordinate system. But the window manager expects
+     * that the bounds are based on the size of the primary monitor and
+     * position is based on the actual window monitor, even if the window
+     * ultimately maximizes onto a secondary monitor. And the window manager
+     * adjusts these values to compensate for differences between the primary
+     * monitor and the monitor that displays the window.
+     * <p>
      * The method translates the incoming bounds to the values acceptable
      * by the window manager. For more details, please refer to 6699851.
      */
-    private void adjustMaximizedBounds(Rectangle b) {
-        GraphicsConfiguration currentDevGC = getGraphicsConfiguration();
+    private Rectangle adjustMaximizedBounds(Rectangle bounds) {
+        // All calculations should be done in the device space
+        bounds = convertToDeviceSpace(bounds);
 
-        GraphicsDevice primaryDev = GraphicsEnvironment
-            .getLocalGraphicsEnvironment().getDefaultScreenDevice();
-        GraphicsConfiguration primaryDevGC = primaryDev.getDefaultConfiguration();
-
-        if (currentDevGC != null && currentDevGC != primaryDevGC) {
-            Rectangle currentDevBounds = currentDevGC.getBounds();
-            Rectangle primaryDevBounds = primaryDevGC.getBounds();
-
-            boolean isCurrentDevLarger =
-                ((currentDevBounds.width - primaryDevBounds.width > 0) ||
-                 (currentDevBounds.height - primaryDevBounds.height > 0));
-
-            // the window manager doesn't seem to compensate for differences when
-            // the primary monitor is larger than the monitor that display the window
-            if (isCurrentDevLarger) {
-                b.width -= (currentDevBounds.width - primaryDevBounds.width);
-                b.height -= (currentDevBounds.height - primaryDevBounds.height);
-            }
-        }
+        GraphicsConfiguration gc = getGraphicsConfiguration();
+        Rectangle currentDevBounds = convertToDeviceSpace(gc, gc.getBounds());
+        // Prepare data for WM_GETMINMAXINFO message.
+        // ptMaxPosition should be in coordinate system of the current monitor,
+        // not the main monitor, or monitor on which we maximize the window.
+        bounds.x -= currentDevBounds.x;
+        bounds.y -= currentDevBounds.y;
+        // ptMaxSize will be used as-is if the size is smaller than the main
+        // monitor. If the size is larger than the main monitor then the
+        // window manager adjusts the size, like this:
+        // result = bounds.w + (current.w - main.w); =>> wrong size
+        // We can try to compensate for this adjustment like this:
+        // result = bounds.w - (current.w - main.w);
+        // but this can result to the size smaller than the main screen, so no
+        // adjustment will be done by the window manager =>> wrong size.
+        // So we skip compensation here and cut the adjustment on
+        // WM_WINDOWPOSCHANGING event.
+        // Note that the result does not depend on the monitor on which we
+        // maximize the window.
+        return bounds;
     }
 
     @Override
--- a/src/java.desktop/windows/classes/sun/awt/windows/WToolkit.java	Fri Feb 07 20:40:59 2020 +0000
+++ b/src/java.desktop/windows/classes/sun/awt/windows/WToolkit.java	Wed Mar 04 12:01:01 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,49 +25,98 @@
 
 package sun.awt.windows;
 
-import java.awt.peer.TaskbarPeer;
-import java.awt.*;
-import java.awt.im.InputMethodHighlight;
-import java.awt.im.spi.InputMethodDescriptor;
-import java.awt.image.*;
-import java.awt.peer.*;
+import java.awt.AWTEvent;
+import java.awt.AWTException;
+import java.awt.Button;
+import java.awt.Canvas;
+import java.awt.Checkbox;
+import java.awt.CheckboxMenuItem;
+import java.awt.Choice;
+import java.awt.Component;
+import java.awt.Cursor;
+import java.awt.Desktop;
+import java.awt.Dialog;
+import java.awt.Dimension;
+import java.awt.EventQueue;
+import java.awt.FileDialog;
+import java.awt.Font;
+import java.awt.FontMetrics;
+import java.awt.Frame;
+import java.awt.GraphicsConfiguration;
+import java.awt.GraphicsDevice;
+import java.awt.GraphicsEnvironment;
+import java.awt.HeadlessException;
+import java.awt.Image;
+import java.awt.Insets;
+import java.awt.JobAttributes;
+import java.awt.Label;
+import java.awt.List;
+import java.awt.Menu;
+import java.awt.MenuBar;
+import java.awt.MenuItem;
+import java.awt.PageAttributes;
+import java.awt.Panel;
+import java.awt.Point;
+import java.awt.PopupMenu;
+import java.awt.PrintJob;
+import java.awt.RenderingHints;
+import java.awt.Robot;
+import java.awt.ScrollPane;
+import java.awt.Scrollbar;
+import java.awt.SystemTray;
+import java.awt.Taskbar;
+import java.awt.TextArea;
+import java.awt.TextComponent;
+import java.awt.TextField;
+import java.awt.Toolkit;
+import java.awt.TrayIcon;
+import java.awt.Window;
+import java.awt.datatransfer.Clipboard;
+import java.awt.dnd.DragGestureEvent;
+import java.awt.dnd.DragGestureListener;
+import java.awt.dnd.DragGestureRecognizer;
+import java.awt.dnd.DragSource;
+import java.awt.dnd.InvalidDnDOperationException;
+import java.awt.dnd.MouseDragGestureRecognizer;
+import java.awt.dnd.peer.DragSourceContextPeer;
 import java.awt.event.FocusEvent;
 import java.awt.event.KeyEvent;
 import java.awt.event.MouseEvent;
-import java.awt.datatransfer.Clipboard;
-import java.awt.TextComponent;
-import java.awt.TrayIcon;
+import java.awt.im.InputMethodHighlight;
+import java.awt.im.spi.InputMethodDescriptor;
+import java.awt.image.ColorModel;
+import java.awt.peer.ButtonPeer;
+import java.awt.peer.CanvasPeer;
+import java.awt.peer.CheckboxMenuItemPeer;
+import java.awt.peer.CheckboxPeer;
+import java.awt.peer.ChoicePeer;
+import java.awt.peer.DesktopPeer;
+import java.awt.peer.DialogPeer;
+import java.awt.peer.FileDialogPeer;
+import java.awt.peer.FontPeer;
+import java.awt.peer.FramePeer;
+import java.awt.peer.KeyboardFocusManagerPeer;
+import java.awt.peer.LabelPeer;
+import java.awt.peer.ListPeer;
+import java.awt.peer.MenuBarPeer;
+import java.awt.peer.MenuItemPeer;
+import java.awt.peer.MenuPeer;
+import java.awt.peer.MouseInfoPeer;
+import java.awt.peer.PanelPeer;
+import java.awt.peer.PopupMenuPeer;
+import java.awt.peer.RobotPeer;
+import java.awt.peer.ScrollPanePeer;
+import java.awt.peer.ScrollbarPeer;
+import java.awt.peer.SystemTrayPeer;
+import java.awt.peer.TaskbarPeer;
+import ja