changeset 52244:8716dd44bc37

Merge
author psadhukhan
date Thu, 04 Oct 2018 14:17:59 +0530
parents c70468fc7118 ef114f6afcf1
children 651c3558ae2b
files make/Jprt.gmk make/jprt.properties test/hotspot/jtreg/jprt.config test/jaxp/javax/xml/jaxp/libs/jaxp/library/JarUtils.java test/jdk/com/sun/jdi/CatchPatternTest.sh test/jdk/com/sun/jdi/ImmutableResourceTest.sh test/jdk/com/sun/jdi/JITDebug.sh test/jdk/com/sun/jdi/PrivateTransportTest.sh test/jdk/com/sun/jdi/connect/spi/JdiLoadedByCustomLoader.sh test/jdk/com/sun/jdi/redefine/RedefineSetUp.sh test/jdk/com/sun/jdi/redefineMethod/RedefineSetUp.sh test/jdk/jprt.config test/jdk/lib/testlibrary/JarUtils.java test/jdk/sun/security/tools/keytool/autotest.sh test/jdk/sun/security/tools/keytool/standard.sh
diffstat 466 files changed, 5366 insertions(+), 6192 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Oct 04 13:01:23 2018 +0530
+++ b/.hgtags	Thu Oct 04 14:17:59 2018 +0530
@@ -514,3 +514,4 @@
 f0f5d23449d31f1b3580c8a73313918cafeaefd7 jdk-12+11
 15094d12a632f452a2064318a4e416d0c7a9ce0c jdk-12+12
 511a9946f83e3e3c7b9dbe1840367063fb39b4e1 jdk-12+13
+8897e41b327c0a5601c6ba2bba5d07f15a3ffc91 jdk-12+14
--- a/make/Jprt.gmk	Thu Oct 04 13:01:23 2018 +0530
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,107 +0,0 @@
-#
-# Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# This file contains targets and utilities needed by JPRT.
-
-# Cygpath is only defined when running on Cygwin
-ifneq ($(CYGPATH), )
-  # If we get JPRT_ARCHIVE_*BUNDLE externally, make sure they have /cygdrive
-  # style paths
-  ifdef JPRT_ARCHIVE_BUNDLE
-    override JPRT_ARCHIVE_BUNDLE := $(shell $(CYGPATH) -u $(JPRT_ARCHIVE_BUNDLE))
-  endif
-  ifdef JPRT_ARCHIVE_TEST_BUNDLE
-    override JPRT_ARCHIVE_TEST_BUNDLE := \
-        $(shell $(CYGPATH) -u $(JPRT_ARCHIVE_TEST_BUNDLE))
-  endif
-  ifdef JPRT_ARCHIVE_SYMBOLS_BUNDLE
-    override JPRT_ARCHIVE_SYMBOLS_BUNDLE := \
-        $(shell $(CYGPATH) -u $(JPRT_ARCHIVE_SYMBOLS_BUNDLE))
-  endif
-endif
-
-# When running in JPRT these will be provided. Need defaults so that this makefile
-# is valid anyway.
-ifndef JPRT_ARCHIVE_BUNDLE
-  JPRT_ARCHIVE_BUNDLE=/tmp/jprt_bundles/jdk-image.zip
-endif
-ifndef JPRT_ARCHIVE_TEST_BUNDLE
-  JPRT_ARCHIVE_TEST_BUNDLE=/tmp/jprt_bundles/test-image.zip
-endif
-ifndef JPRT_ARCHIVE_SYMBOLS_BUNDLE
-  JPRT_ARCHIVE_SYMBOLS_BUNDLE=/tmp/jprt_bundles/symbols-image.zip
-endif
-
-ifeq ($(SKIP_BOOT_CYCLE), false)
-  jprt_bundle: bootcycle-images
-endif
-
-################################################################################
-# JPRT specific bundling targets
-JPRT_TARGET ?= $(DEFAULT_MAKE_TARGET)
-ifeq ($(JPRT_TARGET), $(DEFAULT_MAKE_TARGET))
-  jprt_bundle: $(DEFAULT_MAKE_TARGET) $(JPRT_ARCHIVE_BUNDLE) \
-      $(JPRT_ARCHIVE_TEST_BUNDLE)
-
-  SRC_JDK_IMAGE_DIR := $(JDK_IMAGE_DIR)
-  SRC_TEST_IMAGE_DIR := $(TEST_IMAGE_DIR)
-
-  # This target must be called in the context of a SPEC file
-  $(JPRT_ARCHIVE_BUNDLE): product-images
-	$(call MakeDir, $(@D))
-	$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIPEXE) -y -q -r $@ .
-
-  $(JPRT_ARCHIVE_TEST_BUNDLE): test-image
-	$(call MakeDir, $(@D))
-	$(CD) $(SRC_TEST_IMAGE_DIR) && $(ZIPEXE) -y -q -r $@ .
-
-  ##############################################################################
-  # Optional symbols bundle
-  ifeq ($(GCOV_ENABLED), true)
-    jprt_bundle: $(JPRT_ARCHIVE_SYMBOLS_BUNDLE)
-
-    $(JPRT_ARCHIVE_SYMBOLS_BUNDLE): product-images
-	$(call MakeDir, $(@D))
-	$(CD) $(SYMBOLS_IMAGE_DIR) && $(ZIPEXE) -y -q -r $@ .
-
-  endif
-
-  ##############################################################################
-
-else
-  # Just fake the main bundle to satisfy JPRT
-  jprt_bundle: $(JPRT_TARGET)
-	@$(call TargetEnter)
-	$(MKDIR) -p $(OUTPUTDIR)/bundles
-	$(CD) $(TOPDIR) && $(TAR) cf - README | $(GZIP) > \
-	    $(JPRT_ARCHIVE_BUNDLE)
-	@$(call TargetExit)
-endif
-
-ALL_TARGETS += jprt_bundle
-
-################################################################################
-
-$(eval $(call IncludeCustomExtension, Jprt.gmk))
--- a/make/Main.gmk	Thu Oct 04 13:01:23 2018 +0530
+++ b/make/Main.gmk	Thu Oct 04 14:17:59 2018 +0530
@@ -1131,11 +1131,6 @@
 
 ################################################################################
 
-# Include JPRT targets
-include $(TOPDIR)/make/Jprt.gmk
-
-################################################################################
-
 # The following targets are intentionally not added to ALL_TARGETS since they
 # are internal only, to support Init.gmk.
 
--- a/make/autoconf/hotspot.m4	Thu Oct 04 13:01:23 2018 +0530
+++ b/make/autoconf/hotspot.m4	Thu Oct 04 14:17:59 2018 +0530
@@ -351,6 +351,11 @@
     AC_MSG_RESULT([no])
   fi
 
+  # Disable unsupported GCs for Zero
+  if HOTSPOT_CHECK_JVM_VARIANT(zero); then
+    DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES epsilongc g1gc zgc"
+  fi
+
   # Turn on additional features based on other parts of configure
   if test "x$INCLUDE_DTRACE" = "xtrue"; then
     JVM_FEATURES="$JVM_FEATURES dtrace"
--- a/make/conf/jib-profiles.js	Thu Oct 04 13:01:23 2018 +0530
+++ b/make/conf/jib-profiles.js	Thu Oct 04 14:17:59 2018 +0530
@@ -525,8 +525,7 @@
             profiles[maketestName].default_make_targets = [ "test-make" ];
         });
 
-    // Profiles for building the zero jvm variant. These are used for verification
-    // in JPRT.
+    // Profiles for building the zero jvm variant. These are used for verification.
     var zeroProfiles = {
         "linux-x64-zero": {
             target_os: "linux",
@@ -733,18 +732,8 @@
         });
     });
 
-    // Profiles used to run tests. Used in JPRT and Mach 5.
+    // Profiles used to run tests.
     var testOnlyProfiles = {
-        "run-test-jprt": {
-            target_os: input.build_os,
-            target_cpu: input.build_cpu,
-            dependencies: [ "jtreg", "gnumake", "boot_jdk", "devkit", "jib" ],
-            labels: "test",
-            environment: {
-                "JT_JAVA": common.boot_jdk_home
-            }
-        },
-
         "run-test": {
             target_os: input.build_os,
             target_cpu: input.build_cpu,
@@ -806,7 +795,6 @@
                 + "/Xcode.app/Contents/Developer/usr/bin"
         };
         profiles["run-test"] = concatObjects(profiles["run-test"], macosxRunTestExtra);
-        profiles["run-test-jprt"] = concatObjects(profiles["run-test-jprt"], macosxRunTestExtra);
         profiles["run-test-prebuilt"] = concatObjects(profiles["run-test-prebuilt"], macosxRunTestExtra);
     }
     // On windows we want the debug symbols available at test time
--- a/make/jprt.properties	Thu Oct 04 13:01:23 2018 +0530
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,503 +0,0 @@
-#
-# Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-##############
-#
-# Global settings
-#
-
-# Install test bundle for targets in jprt.test.bundle.targets set
-jprt.selective.test.bundle.installation=true
-
-# The current release name
-jprt.tools.default.release=jdk9
-
-# Allow concurrent changes to be merged in prior to pushing
-jprt.sync.push=true
-
-# Directories to be excluded from the source bundles
-jprt.bundle.exclude.src.dirs=build dist webrev ${jprt.bundle.exclude.src.dirs.extra}
-
-# Use configure when building
-jprt.build.use.configure=true
-jprt.build.use.jib=true
-jprt.test.use.jib=true
-jprt.jib.launcher=bin/jib.sh
-jprt.build.use.jib.publish=true
-
-# Clear out all the build needs as JIB handles this
-jprt.jdk9.build.needs=
-jprt.macosx.jdk9.build.needs=
-jprt.windows_i586.jdk9.build.needs=
-jprt.windows_x64.jdk9.build.needs=
-jprt.solaris.jdk9.build.needs=
-jprt.linux_i586.jdk9.build.needs=
-jprt.linux_x64.jdk9.build.needs=
-jprt.linux_armv6.jdk9.build.needs=
-jprt.linux_armvh.jdk9.build.needs=
-jprt.linux_arm64.jdk9.build.needs=
-jprt.linux_armsflt.jdk9.build.needs=
-jprt.linux_armvfpsflt.jdk9.build.needs=
-jprt.linux_armvfphflt.jdk9.build.needs=
-jprt.linux_armv6vfphflt.jdk9.build.needs=
-jprt.solaris.client.build.needs=
-jprt.linux.client.build.needs=
-jprt.solaris.jdk9.compiler=
-jprt.linux.jdk9.compiler=
-jprt.jdk9.test.needs=
-
-# Disable the need for preinstalled Visual Studio and Xcode
-jprt.windows.jdk9.compiler=
-jprt.windows.6.2.jdk9.compiler=
-jprt.windows.6.3.jdk9.compiler=
-jprt.windows.jdk9.target.attribute.compilerVS2013.appliesTo.builds=none
-jprt.macosx.jdk9.target.attribute.compilerXcode511.appliesTo.builds=none
-
-# Set up the run flavors (jvm variants)
-jprt.run.flavors=c2,default,${my.additional.run.flavors}
-
-# Setup jib profiles
-jprt.linux_i586.product.build.jib.profile=linux-x86
-jprt.linux_x64.product.build.jib.profile=linux-x64
-jprt.macosx_x64.product.build.jib.profile=macosx-x64
-jprt.solaris_sparcv9.product.build.jib.profile=solaris-sparcv9
-jprt.solaris_x64.product.build.jib.profile=solaris-x64
-jprt.windows_i586.product.build.jib.profile=windows-x86
-jprt.windows_x64.product.build.jib.profile=windows-x64
-
-jprt.linux_i586.fastdebug.build.jib.profile=linux-x86-debug
-jprt.linux_x64.fastdebug.build.jib.profile=linux-x64-debug
-jprt.macosx_x64.fastdebug.build.jib.profile=macosx-x64-debug
-jprt.solaris_sparcv9.fastdebug.build.jib.profile=solaris-sparcv9-debug
-jprt.solaris_x64.fastdebug.build.jib.profile=solaris-x64-debug
-jprt.windows_i586.fastdebug.build.jib.profile=windows-x86-debug
-jprt.windows_x64.fastdebug.build.jib.profile=windows-x64-debug
-
-jprt.linux_i586.slowdebug.build.jib.profile=linux-x86-slowdebug
-jprt.linux_x64.slowdebug.build.jib.profile=linux-x64-slowdebug
-jprt.macosx_x64.slowdebug.build.jib.profile=macosx-x64-slowdebug
-jprt.solaris_sparcv9.slowdebug.build.jib.profile=solaris-sparcv9-slowdebug
-jprt.solaris_x64.slowdebug.build.jib.profile=solaris-x64-slowdebug
-jprt.windows_i586.slowdebug.build.jib.profile=windows-x86-slowdebug
-jprt.windows_x64.slowdebug.build.jib.profile=windows-x64-slowdebug
-
-jprt.linux_i586.productOpen.build.jib.profile=linux-x86-open
-jprt.linux_x64.productOpen.build.jib.profile=linux-x64-open
-jprt.macosx_x64.productOpen.build.jib.profile=macosx-x64-open
-jprt.solaris_sparcv9.productOpen.build.jib.profile=solaris-sparcv9-open
-jprt.solaris_x64.productOpen.build.jib.profile=solaris-x64-open
-jprt.windows_i586.productOpen.build.jib.profile=windows-x86-open
-jprt.windows_x64.productOpen.build.jib.profile=windows-x64-open
-
-jprt.linux_i586.fastdebugOpen.build.jib.profile=linux-x86-open-debug
-jprt.linux_x64.fastdebugOpen.build.jib.profile=linux-x64-open-debug
-jprt.macosx_x64.fastdebugOpen.build.jib.profile=macosx-x64-open-debug
-jprt.solaris_sparcv9.fastdebugOpen.build.jib.profile=solaris-sparcv9-open-debug
-jprt.solaris_x64.fastdebugOpen.build.jib.profile=solaris-x64-open-debug
-jprt.windows_i586.fastdebugOpen.build.jib.profile=windows-x86-open-debug
-jprt.windows_x64.fastdebugOpen.build.jib.profile=windows-x64-open-debug
-
-jprt.linux_i586.productZero.build.jib.profile=linux-x86-zero
-jprt.linux_x64.productZero.build.jib.profile=linux-x64-zero
-
-jprt.linux_i586.fastdebugZero.build.jib.profile=linux-x86-zero-debug
-jprt.linux_x64.fastdebugZero.build.jib.profile=linux-x64-zero-debug
-
-jprt.test.jib.profile=run-test-jprt
-
-# Set make target to use for different build flavors
-jprt.build.flavor.fastdebugOpen.target=jprt_bundle
-jprt.build.flavor.fastdebug.target=jprt_bundle
-jprt.build.flavor.product.target=jprt_bundle
-jprt.build.flavor.productOpen.target=jprt_bundle
-jprt.build.flavor.optimized.target=jprt_bundle
-jprt.build.flavor.optimizedOpen.target=jprt_bundle
-jprt.build.flavor.slowdebug.target=jprt_bundle
-jprt.build.flavor.productZero.target=jprt_bundle
-jprt.build.flavor.fastdebugZero.target=jprt_bundle
-
-# Use these configure args to define debug level or provide specific
-# configuration details not covered by Jib profiles.
-jprt.slowdebug.build.configure.args=
-jprt.fastdebug.build.configure.args=--disable-precompiled-headers
-# Don't disable precompiled headers on windows. It's simply too slow.
-jprt.windows_i586.fastdebug.build.configure.args=
-jprt.windows_x64.fastdebug.build.configure.args=
-jprt.windows_i586.fastdebugOpen.build.configure.args=
-jprt.windows_x64.fastdebugOpen.build.configure.args=
-jprt.product.build.configure.args=
-jprt.optimized.build.configure.args=--with-debug-level=optimized
-jprt.slowdebugOpen.build.configure.args=${jprt.slowdebug.build.configure.args}
-jprt.fastdebugOpen.build.configure.args=${jprt.fastdebug.build.configure.args}
-jprt.productOpen.build.configure.args=${jprt.product.build.configure.args}
-jprt.optimizedOpen.build.configure.args=${jprt.product.build.configure.args}
-
-
-# hotspot testset has custom build flavors and build targets
-my.jprt.testsetHasCustomBuildFlavors.hotspot=true
-my.jprt.testsetHasCustomBuildTargets.hotspot=true
-my.jprt.testsetHasCustomBuildFlavors.buildinfra=true
-my.jprt.testsetHasCustomBuildTargets.buildinfra=true
-
-# determine if the specified testset has custom build flavors or build targets
-my.jprt.testsetHasCustomBuildFlavors=${my.jprt.testsetHasCustomBuildFlavors.${jprt.test.set}}
-my.jprt.testsetHasCustomBuildTargets=${my.jprt.testsetHasCustomBuildTargets.${jprt.test.set}}
-
-# Select build flavors and build targets based on the specified testset
-jprt.build.flavors=${my.jprt.testsetHasCustomBuildFlavors ? \
-    ${my.build.flavors.${jprt.test.set}} : ${my.build.flavors.default}}
-jprt.build.targets=${my.jprt.testsetHasCustomBuildTargets ? \
-    ${my.build.targets.${jprt.test.set}} : ${my.build.targets.default}}
-
-# Select test targets - jprt default for jprt.test.set is "default"
-jprt.test.targets=${my.test.targets.${jprt.test.set}}
-jprt.make.rule.test.targets=${my.make.rule.test.targets.${jprt.test.set}}
-
-# Not all test targets need the test image
-jprt.test.bundle.targets=\
-  ${my.make.rule.test.targets.svc}, \
-  ${my.make.rule.test.targets.hotspot.reg}, \
-  ${my.make.rule.test.targets.hotspot.gtest} \
-  ${my.make.rule.test.targets.nativesanity} \
-  ${my.test.target.set:TESTNAME=jdk_lang} \
-  ${my.test.target.set:TESTNAME=jdk_nio}
-
-# 7155453: Work-around to prevent popups on OSX from blocking test completion
-# but the work-around is added to all platforms to be consistent
-jprt.jbb.options=-Djava.awt.headless=true
-
-########
-#
-# Build options (generic)
-#
-
-# Configure args common to all builds
-# Also allows for additional, testset specific configure arguments to be set
-jprt.build.configure.args=						\
-    --with-output-sync=recurse						\
-    --with-jobs=$ALT_PARALLEL_COMPILE_JOBS				\
-    --with-version-opt=$JPRT_JOB_ID				 	\
-    ${my.additional.build.configure.args.${jprt.test.set}}		\
-    ${my.custom.build.configure.args}
-
-########
-#
-# Build targets and options (default/jdk)
-#
-
-# The default build flavors
-my.build.flavors.default=fastdebug,product
-
-# Standard list of jprt build targets for this source tree
-my.build.targets.default= \
-    solaris_sparcv9_5.11-{product|fastdebug}, \
-    solaris_x64_5.11-{product|fastdebug}, \
-    linux_i586_3.8-{product|fastdebug}, \
-    linux_x64_3.8-{product|fastdebug}, \
-    macosx_x64_10.9-{product|fastdebug}, \
-    windows_i586_6.3-{product|fastdebug}, \
-    windows_x64_6.3-{product|fastdebug}, \
-    ${my.additional.build.targets.default}
-
-# Test target list (no fastdebug & limited c2 testing)
-my.test.target.set=							\
-    solaris_sparcv9_5.11-product-c2-TESTNAME,				\
-    solaris_x64_5.11-product-c2-TESTNAME,				\
-    linux_i586_3.8-product-c2-TESTNAME, 				\
-    linux_x64_3.8-product-c2-TESTNAME,					\
-    macosx_x64_10.9-product-c2-TESTNAME,				\
-    windows_i586_6.3-product-c2-TESTNAME,				\
-    windows_x64_6.3-product-c2-TESTNAME
-
-# Default vm test targets (testset=default)
-my.test.targets.default=						\
-    ${my.test.target.set:TESTNAME=jvm98},				\
-    ${my.test.target.set:TESTNAME=scimark}
-
-# Default jdk test targets (testset=default)
-my.make.rule.test.targets.default=					\
-    ${my.test.target.set:TESTNAME=langtools_jtreg},			\
-    ${my.test.target.set:TESTNAME=jdk_lang},				\
-    ${my.test.target.set:TESTNAME=jdk_math},				\
-    ${my.test.target.set:TESTNAME=jdk_util}
-
-# Default vm test targets (testset=core)
-my.test.targets.core=
-
-# Core jdk test targets (testset=core)
-my.make.rule.test.targets.core=						\
-    ${my.test.target.set:TESTNAME=jdk_lang},				\
-    ${my.test.target.set:TESTNAME=jdk_math},				\
-    ${my.test.target.set:TESTNAME=jdk_util},				\
-    ${my.test.target.set:TESTNAME=jdk_io},				\
-    ${my.test.target.set:TESTNAME=jdk_net},				\
-    ${my.test.target.set:TESTNAME=jdk_nio},				\
-    ${my.test.target.set:TESTNAME=jdk_security1},			\
-    ${my.test.target.set:TESTNAME=jdk_security2},			\
-    ${my.test.target.set:TESTNAME=jdk_security3},			\
-    ${my.test.target.set:TESTNAME=jdk_security4},			\
-    ${my.test.target.set:TESTNAME=jdk_rmi},				\
-    ${my.test.target.set:TESTNAME=jdk_text},				\
-    ${my.test.target.set:TESTNAME=jdk_time},				\
-    ${my.test.target.set:TESTNAME=jdk_other},				\
-    ${my.test.target.set:TESTNAME=core_tools}
-
-# Svc vm test targets (testset=svc)
-my.test.targets.svc=
-
-# Core jdk test targets (testset=svc)
-my.make.rule.test.targets.svc=						\
-    ${my.test.target.set:TESTNAME=jdk_management},			\
-    ${my.test.target.set:TESTNAME=jdk_instrument},			\
-    ${my.test.target.set:TESTNAME=jdk_jmx},				\
-    ${my.test.target.set:TESTNAME=jdk_jdi},				\
-    ${my.test.target.set:TESTNAME=jdk_jfr},                             \
-    ${my.test.target.set:TESTNAME=svc_tools},                           \
-    ${my.make.rule.test.targets.svc.extra}
-
-# JAXP vm test targets (testset=jaxp)
-my.test.targets.jaxp=
-
-# JAXP test targets (testset=jaxp)
-my.make.rule.test.targets.jaxp=						\
-    ${my.test.target.set:TESTNAME=jaxp_all}
-
-# All vm test targets (testset=all)
-my.test.targets.all=							\
-    ${my.test.targets.default},						\
-    ${my.test.target.set:TESTNAME=runThese},				\
-    ${my.test.target.set:TESTNAME=jbb_default}
-
-# All jdk test targets (testset=all)
-my.make.rule.test.targets.all=						\
-    ${my.make.rule.test.targets.core},					\
-    ${my.make.rule.test.targets.svc},					\
-    ${my.test.target.set:TESTNAME=jdk_awt},				\
-    ${my.test.target.set:TESTNAME=jdk_beans},				\
-    ${my.test.target.set:TESTNAME=jdk_sound},				\
-    ${my.test.target.set:TESTNAME=jdk_swing}
-
-# PIT vm test targets (testset=pit)
-my.test.targets.pit=							\
-   ${my.test.targets.all}
-
-# PIT jdk test targets (testset=pit)
-my.make.rule.test.targets.pit=						\
-    ${my.test.target.set:TESTNAME=langtools_jtreg},			\
-    ${my.make.rule.test.targets.core},					\
-    ${my.make.rule.test.targets.svc}                                    \
-    ${my.make.rule.test.targets.jaxp}
-
-# JCK test targets in test/Makefile (no windows)
-my.test.target.set.jck=							\
-    solaris_sparcv9_5.11-product-c2-JCK7TESTRULE,			\
-    solaris_x64_5.11-product-c2-JCK7TESTRULE,				\
-    linux_i586_3.8-product-c2-JCK7TESTRULE,				\
-    linux_x64_3.8-product-c2-JCK7TESTRULE
-
-# JCK testset targets
-my.make.rule.test.targets.jck=						\
-    ${my.test.target.set.jck:JCK7TESTRULE=jck7devtools},		\
-    ${my.test.target.set.jck:JCK7TESTRULE=jck7runtime},			\
-    ${my.test.target.set.jck:JCK7TESTRULE=jck7compiler}
-
-
-#############
-#
-# Hotspot related settings (testset=hotspot)
-#
-
-# The hotspot build flavors
-my.build.flavors.hotspot=						\
-    fastdebugOpen,fastdebug,product,productOpen,optimized,optimizedOpen \
-    ${my.additional.build.flavors.hotspot}
-
-# Platforms built for hotspot push jobs
-my.build.targets.hotspot=						\
-    solaris_sparcv9_5.11-{product|fastdebug},				\
-    solaris_x64_5.11-{product|fastdebug},				\
-    linux_i586_3.8-{product|fastdebug},					\
-    linux_x64_3.8-{product|fastdebug},					\
-    macosx_x64_10.9-{product|fastdebug},				\
-    windows_i586_6.3-{product|fastdebug},				\
-    windows_x64_6.3-{product|fastdebug},				\
-    solaris_x64_5.11-{fastdebugOpen},					\
-    linux_x64_3.8-{productOpen},					\
-    ${my.additional.build.targets.hotspot}
-
-# Tests to run on the various platforms for hotspot push jobs
-my.test.targets.hotspot.solaris.sparcv9=				\
-    solaris_sparcv9_5.11-{product|fastdebug}-c2-jvm98,			\
-    solaris_sparcv9_5.11-{product|fastdebug}-c2-jvm98_nontiered,	\
-    solaris_sparcv9_5.11-{product|fastdebug}-c2-scimark,		\
-    solaris_sparcv9_5.11-product-c2-runThese8,
-
-my.test.targets.hotspot.solaris.x64=					\
-    solaris_x64_5.11-{product|fastdebug}-c2-jvm98,			\
-    solaris_x64_5.11-{product|fastdebug}-c2-jvm98_nontiered,		\
-    solaris_x64_5.11-{product|fastdebug}-c2-scimark,			\
-    solaris_x64_5.11-product-c2-runThese8,				\
-    solaris_x64_5.11-product-c2-runThese8_Xcomp_lang,			\
-    solaris_x64_5.11-product-c2-runThese8_Xcomp_vm,
-
-my.test.targets.hotspot.linux.i586=					\
-    linux_i586_3.8-{product|fastdebug}-c2-jvm98,			\
-    linux_i586_3.8-{product|fastdebug}-c2-jvm98_nontiered,		\
-    linux_i586_3.8-{product|fastdebug}-c2-scimark,			\
-    linux_i586_3.8-fastdebug-c2-runThese8_Xcomp_lang,			\
-    linux_i586_3.8-fastdebug-c2-runThese8_Xcomp_vm
-
-my.test.targets.hotspot.linux.x64=					\
-    linux_x64_3.8-{product|fastdebug}-c2-jvm98,				\
-    linux_x64_3.8-{product|fastdebug}-c2-jvm98_nontiered,		\
-    linux_x64_3.8-{product|fastdebug}-c2-scimark
-
-my.test.targets.hotspot.macosx.x64=					\
-    macosx_x64_10.9-{product|fastdebug}-c2-jvm98,			\
-    macosx_x64_10.9-{product|fastdebug}-c2-jvm98_nontiered,		\
-    macosx_x64_10.9-{product|fastdebug}-c2-scimark
-
-my.test.targets.hotspot.windows.i586=					\
-    windows_i586_6.3-{product|fastdebug}-c2-jvm98,			\
-    windows_i586_6.3-{product|fastdebug}-c2-jvm98_nontiered,		\
-    windows_i586_6.3-{product|fastdebug}-c2-scimark,			\
-    windows_i586_6.3-product-c2-runThese8,				\
-    windows_i586_6.3-product-c2-runThese8_Xcomp_lang,			\
-    windows_i586_6.3-product-c2-runThese8_Xcomp_vm,
-
-my.test.targets.hotspot.windows.x64=					\
-    windows_x64_6.3-{product|fastdebug}-c2-jvm98,			\
-    windows_x64_6.3-{product|fastdebug}-c2-jvm98_nontiered,		\
-    windows_x64_6.3-{product|fastdebug}-c2-scimark,			\
-    windows_x64_6.3-product-c2-runThese8,				\
-    windows_x64_6.3-product-c2-runThese8_Xcomp_lang,			\
-    windows_x64_6.3-product-c2-runThese8_Xcomp_vm,
-
-# Some basic "smoke" tests for OpenJDK builds
-my.test.targets.hotspot.open=						\
-    solaris_x64_5.11-{productOpen|fastdebugOpen}-c2-jvm98,		\
-    linux_x64_3.8-{productOpen|fastdebugOpen}-c2-jvm98
-
-# The complete list of test targets for jprt
-my.test.targets.hotspot=						\
-  ${my.test.targets.hotspot.open},					\
-  ${my.test.targets.hotspot.solaris.sparcv9},				\
-  ${my.test.targets.hotspot.solaris.x64},				\
-  ${my.test.targets.hotspot.linux.i586},				\
-  ${my.test.targets.hotspot.linux.x64},					\
-  ${my.test.targets.hotspot.macosx.x64},				\
-  ${my.test.targets.hotspot.windows.i586},				\
-  ${my.test.targets.hotspot.windows.x64},				\
-  ${my.test.targets.hotspot.solaris.sparcv9},				\
-  ${my.test.targets.hotspot.solaris.x64},				\
-  ${my.test.targets.hotspot.linux.x64},					\
-  ${my.test.targets.hotspot.windows.i586},				\
-  ${my.test.targets.hotspot.windows.x64},				\
-  ${my.additional.test.targets.hotspot}
-
-
-# Make file based test targets
-
-my.make.rule.test.targets.hotspot.gtest= \
-  linux_i586_3.8-*-default-hotspot_gtest, \
-  linux_x64_3.8-*-default-hotspot_gtest, \
-  macosx_x64_10.9-*-default-hotspot_gtest, \
-  solaris_sparcv9_5.11-*-default-hotspot_gtest, \
-  solaris_x64_5.11-*-default-hotspot_gtest, \
-  windows_i586_6.3-*-default-hotspot_gtest, \
-  windows_x64_6.3-*-default-hotspot_gtest, \
-  ${my.additional.make.rule.test.targets.hotspot.gtest}
-
-my.make.rule.test.targets.hotspot.reg.group=				\
-  solaris_sparcv9_5.11-fastdebug-c2-GROUP,				\
-  solaris_x64_5.11-fastdebug-c2-GROUP,					\
-  linux_i586_3.8-fastdebug-c2-GROUP,					\
-  linux_x64_3.8-fastdebug-c2-GROUP,					\
-  macosx_x64_10.9-fastdebug-c2-GROUP,					\
-  windows_i586_6.3-fastdebug-c2-GROUP,					\
-  windows_x64_6.3-fastdebug-c2-GROUP
-
-# Hotspot jtreg tests
-my.make.rule.test.targets.hotspot.reg=							\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_compiler_1},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_compiler_2},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_compiler_3},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_compiler_closed},	\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_gc_1},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_gc_2},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_gc_closed},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_gc_gcold},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_gc_gcbasher},	\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_runtime},		\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=hotspot_tier1_serviceability},	\
-  ${my.make.rule.test.targets.hotspot.reg.group:GROUP=jdk_svc_sanity},			\
-  solaris_sparcv9_5.11-product-c2-hotspot_tier1_gc_gcbasher,				\
-  solaris_x64_5.11-product-c2-hotspot_tier1_gc_gcbasher,					\
-  linux_i586_3.8-product-c2-hotspot_tier1_gc_gcbasher,					\
-  linux_x64_3.8-product-c2-hotspot_tier1_gc_gcbasher,					\
-  macosx_x64_10.9-product-c2-hotspot_tier1_gc_gcbasher,					\
-  windows_i586_6.3-product-c2-hotspot_tier1_gc_gcbasher,					\
-  windows_x64_6.3-product-c2-hotspot_tier1_gc_gcbasher,                                  \
-  ${my.additional.make.rule.test.targets.hotspot.reg}
-
-# Other Makefile based Hotspot tests
-my.make.rule.test.targets.hotspot.other=                                \
-  ${my.make.rule.test.targets.hotspot.gtest},                           \
-  ${my.additional.make.rule.test.targets.hotspot.other}
-
-# All the makefile based tests to run
-my.make.rule.test.targets.hotspot=                                      \
-  ${my.make.rule.test.targets.hotspot.reg}                              \
-  ${my.make.rule.test.targets.hotspot.other}
-
-# Native jdk and hotspot test targets (testset=nativesanity)
-my.make.rule.test.targets.nativesanity=					\
-    ${my.test.target.set:TESTNAME=jdk_native_sanity},			\
-    ${my.test.target.set:TESTNAME=hotspot_native_sanity}
-
-################################################################################
-# Testset buildinfra
-my.build.flavors.buildinfra = \
-    product,fastdebug,slowdebug,productZero,fastdebugZero \
-    ${my.additional.build.flavors.buildinfra}
-
-# Platforms built for hotspot push jobs
-my.build.targets.buildinfra = \
-    solaris_sparcv9_5.11-{product|fastdebug|slowdebug}, \
-    solaris_x64_5.11-{product|fastdebug|slowdebug}, \
-    linux_i586_3.8-{product|fastdebug|slowdebug|productZero|fastdebugZero}, \
-    linux_x64_3.8-{product|fastdebug|slowdebug|productZero|fastdebugZero}, \
-    macosx_x64_10.9-{product|fastdebug|slowdebug}, \
-    windows_i586_6.3-{product|fastdebug|slowdebug}, \
-    windows_x64_6.3-{product|fastdebug|slowdebug}, \
-    ${my.additional.build.targets.buildinfra}
-
-my.test.targets.buildinfra = \
-    ${my.test.targets.default}, \
-    ${my.test.targets.hotspot}
-
-my.make.rule.test.targets.buildinfra = \
-    ${my.make.rule.test.targets.default}, \
-    ${my.make.rule.test.targets.hotspot}
--- a/make/langtools/tools/propertiesparser/gen/ClassGenerator.java	Thu Oct 04 13:01:23 2018 +0530
+++ b/make/langtools/tools/propertiesparser/gen/ClassGenerator.java	Thu Oct 04 14:17:59 2018 +0530
@@ -25,6 +25,8 @@
 
 package propertiesparser.gen;
 
+import static java.util.stream.Collectors.toList;
+
 import propertiesparser.parser.Message;
 import propertiesparser.parser.MessageFile;
 import propertiesparser.parser.MessageInfo;
@@ -44,11 +46,12 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.TreeSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.Set;
-import java.util.Properties;
+import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 
@@ -150,7 +153,11 @@
     public void generateFactory(MessageFile messageFile, File outDir) {
         Map<FactoryKind, List<Map.Entry<String, Message>>> groupedEntries =
                 messageFile.messages.entrySet().stream()
-                        .collect(Collectors.groupingBy(e -> FactoryKind.parseFrom(e.getKey().split("\\.")[1])));
+                        .collect(
+                                Collectors.groupingBy(
+                                        e -> FactoryKind.parseFrom(e.getKey().split("\\.")[1]),
+                                        TreeMap::new,
+                                        toList()));
         //generate nested classes
         List<String> nestedDecls = new ArrayList<>();
         Set<String> importedTypes = new TreeSet<>();
--- a/make/src/classes/build/tools/jfr/GenerateJfrFiles.java	Thu Oct 04 13:01:23 2018 +0530
+++ b/make/src/classes/build/tools/jfr/GenerateJfrFiles.java	Thu Oct 04 14:17:59 2018 +0530
@@ -477,6 +477,7 @@
             out.write("");
             out.write("#else // !INCLUDE_JFR");
             out.write("");
+            out.write("template <typename T>");
             out.write("class JfrEvent {");
             out.write(" public:");
             out.write("  JfrEvent() {}");
@@ -497,103 +498,83 @@
 
     private static void printTypes(Printer out, Metadata metadata, boolean empty) {
         for (TypeElement t : metadata.getStructs()) {
-            if (empty) {
-                out.write("");
-                printEmptyType(out, t);
-            } else {
-                printType(out, t);
-            }
+            printType(out, t, empty);
             out.write("");
         }
         for (EventElement e : metadata.getEvents()) {
-            if (empty) {
-                printEmptyEvent(out, e);
-            } else {
-                printEvent(out, e);
-            }
+            printEvent(out, e, empty);
             out.write("");
         }
     }
 
-    private static void printEmptyEvent(Printer out, EventElement event) {
-        out.write("class Event" + event.name + " : public JfrEvent");
-        out.write("{");
-        out.write(" public:");
-        out.write("  Event" + event.name + "(EventStartTime ignore=TIMED) {}");
-        if (event.startTime) {
-            StringJoiner sj = new StringJoiner(",\n    ");
-            for (FieldElement f : event.fields) {
-                sj.add(f.getParameterType());
-            }
-            out.write("  Event" + event.name + "(");
-            out.write("    " + sj.toString() + ") { }");
-        }
-        for (FieldElement f : event.fields) {
-            out.write("  void set_" + f.name + "(" + f.getParameterType() + ") { }");
-        }
-        out.write("};");
-    }
-
-    private static void printEmptyType(Printer out, TypeElement t) {
+    private static void printType(Printer out, TypeElement t, boolean empty) {
         out.write("struct JfrStruct" + t.name);
         out.write("{");
+        if (!empty) {
+          out.write(" private:");
+          for (FieldElement f : t.fields) {
+              printField(out, f);
+          }
+          out.write("");
+        }
         out.write(" public:");
         for (FieldElement f : t.fields) {
-            out.write("  void set_" + f.name + "(" + f.getParameterType() + ") { }");
-        }
-        out.write("};");
-    }
-
-    private static void printType(Printer out, TypeElement t) {
-        out.write("struct JfrStruct" + t.name);
-        out.write("{");
-        out.write(" private:");
-        for (FieldElement f : t.fields) {
-            printField(out, f);
+           printTypeSetter(out, f, empty);
         }
         out.write("");
-        out.write(" public:");
-        for (FieldElement f : t.fields) {
-            printTypeSetter(out, f);
+        if (!empty) {
+          printWriteData(out, t.fields);
         }
-        out.write("");
-        printWriteData(out, t.fields);
         out.write("};");
         out.write("");
     }
 
-    private static void printEvent(Printer out, EventElement event) {
+    private static void printEvent(Printer out, EventElement event, boolean empty) {
         out.write("class Event" + event.name + " : public JfrEvent<Event" + event.name + ">");
         out.write("{");
-        out.write(" private:");
-        for (FieldElement f : event.fields) {
-            printField(out, f);
+        if (!empty) {
+          out.write(" private:");
+          for (FieldElement f : event.fields) {
+              printField(out, f);
+          }
+          out.write("");
         }
-        out.write("");
         out.write(" public:");
-        out.write("  static const bool hasThread = " + event.thread + ";");
-        out.write("  static const bool hasStackTrace = " + event.stackTrace + ";");
-        out.write("  static const bool isInstant = " + !event.startTime + ";");
-        out.write("  static const bool hasCutoff = " + event.cutoff + ";");
-        out.write("  static const bool isRequestable = " + event.periodic + ";");
-        out.write("  static const JfrEventId eventId = Jfr" + event.name + "Event;");
-        out.write("");
-        out.write("  Event" + event.name + "(EventStartTime timing=TIMED) : JfrEvent<Event" + event.name + ">(timing) {}");
+        if (!empty) {
+          out.write("  static const bool hasThread = " + event.thread + ";");
+          out.write("  static const bool hasStackTrace = " + event.stackTrace + ";");
+          out.write("  static const bool isInstant = " + !event.startTime + ";");
+          out.write("  static const bool hasCutoff = " + event.cutoff + ";");
+          out.write("  static const bool isRequestable = " + event.periodic + ";");
+          out.write("  static const JfrEventId eventId = Jfr" + event.name + "Event;");
+          out.write("");
+        }
+        if (!empty) {
+          out.write("  Event" + event.name + "(EventStartTime timing=TIMED) : JfrEvent<Event" + event.name + ">(timing) {}");
+        } else {
+          out.write("  Event" + event.name + "(EventStartTime timing=TIMED) {}");
+        }
         out.write("");
         int index = 0;
         for (FieldElement f : event.fields) {
             out.write("  void set_" + f.name + "(" + f.getParameterType() + " " + f.getParameterName() + ") {");
-            out.write("    this->_" + f.name + " = " + f.getParameterName() + ";");
-            out.write("    DEBUG_ONLY(set_field_bit(" + index++ + "));");
+            if (!empty) {
+              out.write("    this->_" + f.name + " = " + f.getParameterName() + ";");
+              out.write("    DEBUG_ONLY(set_field_bit(" + index++ + "));");
+            }
             out.write("  }");
         }
         out.write("");
-        printWriteData(out, event.fields);
-        out.write("");
+        if (!empty) {
+          printWriteData(out, event.fields);
+          out.write("");
+        }
         out.write("  using JfrEvent<Event" + event.name + ">::commit; // else commit() is hidden by overloaded versions in this class");
-        printConstructor2(out, event);
-        printCommitMethod(out, event);
-        printVerify(out, event.fields);
+        printConstructor2(out, event, empty);
+        printCommitMethod(out, event, empty);
+        if (!empty) {
+          printVerify(out, event.fields);
+        }
         out.write("};");
     }
 
@@ -610,8 +591,12 @@
         out.write("  }");
     }
 
-    private static void printTypeSetter(Printer out, FieldElement field) {
-        out.write("  void set_" + field.name + "(" + field.getParameterType() + " new_value) { this->_" + field.name + " = new_value; }");
+    private static void printTypeSetter(Printer out, FieldElement field, boolean empty) {
+        if (!empty) {
+          out.write("  void set_" + field.name + "(" + field.getParameterType() + " new_value) { this->_" + field.name + " = new_value; }");
+        } else {
+          out.write("  void set_" + field.name + "(" + field.getParameterType() + " new_value) { }");
+        }
     }
 
     private static void printVerify(Printer out, List<FieldElement> fields) {
@@ -626,7 +611,7 @@
         out.write("#endif");
     }
 
-    private static void printCommitMethod(Printer out, EventElement event) {
+    private static void printCommitMethod(Printer out, EventElement event, boolean empty) {
         if (event.startTime) {
             StringJoiner sj = new StringJoiner(",\n              ");
             for (FieldElement f : event.fields) {
@@ -634,12 +619,14 @@
             }
             out.write("");
             out.write("  void commit(" + sj.toString() + ") {");
-            out.write("    if (should_commit()) {");
-            for (FieldElement f : event.fields) {
-                out.write("      set_" + f.name + "(" + f.name + ");");
+            if (!empty) {
+              out.write("    if (should_commit()) {");
+              for (FieldElement f : event.fields) {
+                  out.write("      set_" + f.name + "(" + f.name + ");");
+              }
+              out.write("      commit();");
+              out.write("    }");
             }
-            out.write("      commit();");
-            out.write("    }");
             out.write("  }");
         }
         out.write("");
@@ -652,22 +639,24 @@
             sj.add(f.getParameterType() + " " + f.name);
         }
         out.write("  static void commit(" + sj.toString() + ") {");
-        out.write("    Event" + event.name + " me(UNTIMED);");
-        out.write("");
-        out.write("    if (me.should_commit()) {");
-        if (event.startTime) {
-            out.write("      me.set_starttime(startTicks);");
-            out.write("      me.set_endtime(endTicks);");
+        if (!empty) {
+          out.write("    Event" + event.name + " me(UNTIMED);");
+          out.write("");
+          out.write("    if (me.should_commit()) {");
+          if (event.startTime) {
+              out.write("      me.set_starttime(startTicks);");
+              out.write("      me.set_endtime(endTicks);");
+          }
+          for (FieldElement f : event.fields) {
+              out.write("      me.set_" + f.name + "(" + f.name + ");");
+          }
+          out.write("      me.commit();");
+          out.write("    }");
         }
-        for (FieldElement f : event.fields) {
-            out.write("      me.set_" + f.name + "(" + f.name + ");");
-        }
-        out.write("      me.commit();");
-        out.write("    }");
         out.write("  }");
     }
 
-    private static void printConstructor2(Printer out, EventElement event) {
+    private static void printConstructor2(Printer out, EventElement event, boolean empty) {
         if (!event.startTime) {
             out.write("");
             out.write("");
@@ -679,12 +668,16 @@
             for (FieldElement f : event.fields) {
                 sj.add(f.getParameterType() + " " + f.name);
             }
-            out.write("    " + sj.toString() + ") : JfrEvent<Event" + event.name + ">(TIMED) {");
-            out.write("    if (should_commit()) {");
-            for (FieldElement f : event.fields) {
-                out.write("      set_" + f.name + "(" + f.name + ");");
+            if (!empty) {
+              out.write("    " + sj.toString() + ") : JfrEvent<Event" + event.name + ">(TIMED) {");
+              out.write("    if (should_commit()) {");
+              for (FieldElement f : event.fields) {
+                  out.write("      set_" + f.name + "(" + f.name + ");");
+              }
+              out.write("    }");
+            } else {
+              out.write("    " + sj.toString() + ") {");
             }
-            out.write("    }");
             out.write("  }");
         }
     }
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1709,6 +1709,7 @@
         default: ShouldNotReachHere();
         }
         break;
+      default:
         ShouldNotReachHere();
       }
     } else {
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -584,8 +584,8 @@
     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
     case longTag:    do_ArithmeticOp_Long(x); return;
     case intTag:     do_ArithmeticOp_Int(x);  return;
+    default:         ShouldNotReachHere();    return;
   }
-  ShouldNotReachHere();
 }
 
 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
@@ -792,9 +792,13 @@
           __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
           break;
         }
+        default:
+          ShouldNotReachHere();
       }
       break;
     }
+    default:
+      ShouldNotReachHere();
   }
 }
 
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -195,9 +195,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
   // Verify stub.
   address stub = find_stub(false /* is_aot */);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1505,7 +1505,7 @@
 #ifndef PRODUCT
   {
     char buffer[64];
-    snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
+    snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
     block_comment(buffer);
   }
 #endif
@@ -1568,7 +1568,7 @@
 #ifndef PRODUCT
   {
     char buffer[64];
-    snprintf(buffer, sizeof(buffer), "0x%"PRIX64, imm64);
+    snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
     block_comment(buffer);
   }
 #endif
@@ -1681,7 +1681,7 @@
 #ifndef PRODUCT
     {
       char buffer[64];
-      snprintf(buffer, sizeof(buffer), "0x%"PRIX32, imm32);
+      snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
       block_comment(buffer);
     }
 #endif
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1950,24 +1950,20 @@
   //     didn't see any synchronization is progress, and escapes.
   __ mov(rscratch1, _thread_in_native_trans);
 
-  if(os::is_MP()) {
-    if (UseMembar) {
-      __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
-
-      // Force this write out before the read below
-      __ dmb(Assembler::ISH);
-    } else {
-      __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
-      __ stlrw(rscratch1, rscratch2);
-
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(rthread, r2);
-    }
+  if (UseMembar) {
+    __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
+
+    // Force this write out before the read below
+    __ dmb(Assembler::ISH);
   } else {
-    __ strw(rscratch1, Address(rthread, JavaThread::thread_state_offset()));
+    __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
+    __ stlrw(rscratch1, rscratch2);
+
+    // Write serialization page so VM thread can do a pseudo remote membar.
+    // We use the current thread pointer to calculate a thread specific
+    // offset to write to within the page. This minimizes bus traffic
+    // due to cache line collision.
+    __ serialize_memory(rthread, r2);
   }
 
   // check for safepoint operation in progress and/or pending suspend requests
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1394,17 +1394,15 @@
   __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
   __ stlrw(rscratch1, rscratch2);
 
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ dmb(Assembler::ISH);
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(rthread, rscratch2);
-    }
+  if (UseMembar) {
+    // Force this write out before the read below
+    __ dmb(Assembler::ISH);
+  } else {
+    // Write serialization page so VM thread can do a pseudo remote membar.
+    // We use the current thread pointer to calculate a thread specific
+    // offset to write to within the page. This minimizes bus traffic
+    // due to cache line collision.
+    __ serialize_memory(rthread, rscratch2);
   }
 
   // check for safepoint operation in progress and/or pending suspend requests
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -412,7 +412,7 @@
     // Stash null_sentinel address to get its value later
     __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr());
     __ ldr(tmp, Address(rarg));
-    __ cmp(result, tmp);
+    __ cmpoop(result, tmp);
     __ br(Assembler::NE, notNull);
     __ mov(result, 0);  // NULL object reference
     __ bind(notNull);
@@ -2329,6 +2329,7 @@
   switch (code) {
   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+  default: break;
   }
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
@@ -2953,6 +2954,7 @@
     case Bytecodes::_fast_dputfield: __ pop_d(); break;
     case Bytecodes::_fast_fputfield: __ pop_f(); break;
     case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
+    default: break;
     }
     __ bind(L2);
   }
--- a/src/hotspot/cpu/arm/arm.ad	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/arm.ad	Thu Oct 04 14:17:59 2018 +0530
@@ -5316,8 +5316,7 @@
 // Prefetch instructions.
 // Must be safe to execute with invalid address (cannot fault).
 
-instruct prefetchAlloc_mp( memoryP mem ) %{
-  predicate(os::is_MP());
+instruct prefetchAlloc( memoryP mem ) %{
   match( PrefetchAllocation mem );
   ins_cost(MEMORY_REF_COST);
   size(4);
@@ -5333,23 +5332,6 @@
   ins_pipe(iload_mem);
 %}
 
-instruct prefetchAlloc_sp( memoryP mem ) %{
-  predicate(!os::is_MP());
-  match( PrefetchAllocation mem );
-  ins_cost(MEMORY_REF_COST);
-  size(4);
-
-  format %{ "PLD $mem\t! Prefetch allocation" %}
-  ins_encode %{
-#ifdef AARCH64
-    __ prfm(pstl1keep, $mem$$Address);
-#else
-    __ pld($mem$$Address);
-#endif
-  %}
-  ins_pipe(iload_mem);
-%}
-
 //----------Store Instructions-------------------------------------------------
 // Store Byte
 instruct storeB(memoryB mem, store_RegI src) %{
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -870,8 +870,8 @@
     case doubleTag:  do_ArithmeticOp_FPU(x);  return;
     case longTag:    do_ArithmeticOp_Long(x); return;
     case intTag:     do_ArithmeticOp_Int(x);  return;
+    default:         ShouldNotReachHere();    return;
   }
-  ShouldNotReachHere();
 }
 
 
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -155,9 +155,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
   // Verify stub.
   address stub = find_stub(/*is_aot*/ false);
--- a/src/hotspot/cpu/arm/jniFastGetField_arm.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/jniFastGetField_arm.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,13 +127,9 @@
   __ bic(R1, R1, JNIHandles::weak_tag_mask);
 #endif
 
-  if (os::is_MP()) {
-    // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
-    __ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
-    __ ldr(Robj, Address(R1, Rtmp1));
-  } else {
-    __ ldr(Robj, Address(R1));
-  }
+  // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
+  __ andr(Rtmp1, Rsafept_cnt, (unsigned)1);
+  __ ldr(Robj, Address(R1, Rtmp1));
 
 #ifdef AARCH64
   __ add(Robj, Robj, AsmOperand(R2, lsr, 2));
@@ -198,25 +194,21 @@
       ShouldNotReachHere();
   }
 
-  if(os::is_MP()) {
-      // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
+  // Address dependency restricts memory access ordering. It's cheaper than explicit LoadLoad barrier
 #if defined(__ABI_HARD__) && !defined(AARCH64)
-    if (type == T_FLOAT || type == T_DOUBLE) {
-      __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
-      __ fmrrd(Rres, Rres_hi, D0);
-      __ eor(Rtmp2, Rres, Rres);
-      __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
-    } else
+  if (type == T_FLOAT || type == T_DOUBLE) {
+    __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
+    __ fmrrd(Rres, Rres_hi, D0);
+    __ eor(Rtmp2, Rres, Rres);
+    __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
+  } else
 #endif // __ABI_HARD__ && !AARCH64
-    {
+  {
 #ifndef AARCH64
-      __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
+    __ ldr_literal(Rsafepoint_counter_addr, safepoint_counter_addr);
 #endif // !AARCH64
-      __ eor(Rtmp2, Rres, Rres);
-      __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
-    }
-  } else {
-    __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr));
+    __ eor(Rtmp2, Rres, Rres);
+    __ ldr_s32(Rsafept_cnt2, Address(Rsafepoint_counter_addr, Rtmp2));
   }
   __ cmp(Rsafept_cnt2, Rsafept_cnt);
 #ifdef AARCH64
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1563,8 +1563,6 @@
 // Serializes memory.
 // tmp register is not used on AArch64, this parameter is provided solely for better compatibility with 32-bit ARM
 void MacroAssembler::membar(Membar_mask_bits order_constraint, Register tmp) {
-  if (!os::is_MP()) return;
-
   // TODO-AARCH64 investigate dsb vs dmb effects
   if (order_constraint == StoreStore) {
     dmb(DMB_st);
@@ -1585,7 +1583,6 @@
                             Register tmp,
                             bool preserve_flags,
                             Register load_tgt) {
-  if (!os::is_MP()) return;
 
   if (order_constraint == StoreStore) {
     dmb(DMB_st, tmp);
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2996,6 +2996,7 @@
   switch (code) {
   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+  default: break;
   }
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
@@ -3145,15 +3146,11 @@
   const Register Rindex   = R5_tmp;
   const Register Rflags   = R5_tmp;
 
-  const bool gen_volatile_check = os::is_MP();
-
   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
   jvmti_post_field_access(Rcache, Rindex, is_static, false);
   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
 
-  if (gen_volatile_check) {
-    __ mov(Rflagsav, Rflags);
-  }
+  __ mov(Rflagsav, Rflags);
 
   if (!is_static) pop_and_check_object(Robj);
 
@@ -3390,16 +3387,13 @@
 
   __ bind(Done);
 
-  if (gen_volatile_check) {
-    // Check for volatile field
-    Label notVolatile;
-    __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
-
-    __ bind(notVolatile);
-  }
-
+  // Check for volatile field
+  Label notVolatile;
+  __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
+
+  __ bind(notVolatile);
 }
 
 void TemplateTable::getfield(int byte_no) {
@@ -3491,22 +3485,18 @@
   const Register Rindex   = R5_tmp;
   const Register Rflags   = R5_tmp;
 
-  const bool gen_volatile_check = os::is_MP();
-
   resolve_cache_and_index(byte_no, Rcache, Rindex, sizeof(u2));
   jvmti_post_field_mod(Rcache, Rindex, is_static);
   load_field_cp_cache_entry(Rcache, Rindex, Roffset, Rflags, Robj, is_static);
 
-  if (gen_volatile_check) {
-    // Check for volatile field
-    Label notVolatile;
-    __ mov(Rflagsav, Rflags);
-    __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
-
-    __ bind(notVolatile);
-  }
+  // Check for volatile field
+  Label notVolatile;
+  __ mov(Rflagsav, Rflags);
+  __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
+
+  __ bind(notVolatile);
 
   Label Done, Lint, shouldNotReachHere;
   Label Ltable, Lbtos, Lztos, Lctos, Lstos, Litos, Lltos, Lftos, Ldtos, Latos;
@@ -3732,36 +3722,33 @@
 
   __ bind(Done);
 
-  if (gen_volatile_check) {
-    Label notVolatile;
-    if (is_static) {
-      // Just check for volatile. Memory barrier for static final field
-      // is handled by class initialization.
-      __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-      volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
-      __ bind(notVolatile);
-    } else {
-      // Check for volatile field and final field
-      Label skipMembar;
-
-      __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
-                       1 << ConstantPoolCacheEntry::is_final_shift);
-      __ b(skipMembar, eq);
-
-      __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-      // StoreLoad barrier after volatile field write
-      volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
-      __ b(skipMembar);
-
-      // StoreStore barrier after final field write
-      __ bind(notVolatile);
-      volatile_barrier(MacroAssembler::StoreStore, Rtemp);
-
-      __ bind(skipMembar);
-    }
+  Label notVolatile2;
+  if (is_static) {
+    // Just check for volatile. Memory barrier for static final field
+    // is handled by class initialization.
+    __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
+    volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
+    __ bind(notVolatile2);
+  } else {
+    // Check for volatile field and final field
+    Label skipMembar;
+
+    __ tst(Rflagsav, 1 << ConstantPoolCacheEntry::is_volatile_shift |
+           1 << ConstantPoolCacheEntry::is_final_shift);
+    __ b(skipMembar, eq);
+
+    __ tbz(Rflagsav, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
+
+    // StoreLoad barrier after volatile field write
+    volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
+    __ b(skipMembar);
+
+    // StoreStore barrier after final field write
+    __ bind(notVolatile2);
+    volatile_barrier(MacroAssembler::StoreStore, Rtemp);
+
+    __ bind(skipMembar);
   }
-
 }
 
 void TemplateTable::putfield(int byte_no) {
@@ -3831,31 +3818,25 @@
   const Register Rflags  = Rtmp_save0; // R4/R19
   const Register Robj    = R5_tmp;
 
-  const bool gen_volatile_check = os::is_MP();
-
   // access constant pool cache
   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
 
   __ add(Rcache, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
 
-  if (gen_volatile_check) {
-    // load flags to test volatile
-    __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
-  }
+  // load flags to test volatile
+  __ ldr_u32(Rflags, Address(Rcache, base + ConstantPoolCacheEntry::flags_offset()));
 
   // replace index with field offset from cache entry
   __ ldr(Roffset, Address(Rcache, base + ConstantPoolCacheEntry::f2_offset()));
 
-  if (gen_volatile_check) {
-    // Check for volatile store
-    Label notVolatile;
-    __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
-    volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
-
-    __ bind(notVolatile);
-  }
+  // Check for volatile store
+  Label notVolatile;
+  __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  // TODO-AARCH64 on AArch64, store-release instructions can be used to get rid of this explict barrier
+  volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
+
+  __ bind(notVolatile);
 
   // Get object from stack
   pop_and_check_object(Robj);
@@ -3902,28 +3883,25 @@
       ShouldNotReachHere();
   }
 
-  if (gen_volatile_check) {
-    Label notVolatile;
-    Label skipMembar;
-    __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
-                   1 << ConstantPoolCacheEntry::is_final_shift);
-    __ b(skipMembar, eq);
-
-    __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    // StoreLoad barrier after volatile field write
-    volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
-    __ b(skipMembar);
-
-    // StoreStore barrier after final field write
-    __ bind(notVolatile);
-    volatile_barrier(MacroAssembler::StoreStore, Rtemp);
-
-    __ bind(skipMembar);
-  }
+  Label notVolatile2;
+  Label skipMembar;
+  __ tst(Rflags, 1 << ConstantPoolCacheEntry::is_volatile_shift |
+         1 << ConstantPoolCacheEntry::is_final_shift);
+  __ b(skipMembar, eq);
+
+  __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile2);
+
+  // StoreLoad barrier after volatile field write
+  volatile_barrier(MacroAssembler::StoreLoad, Rtemp);
+  __ b(skipMembar);
+
+  // StoreStore barrier after final field write
+  __ bind(notVolatile2);
+  volatile_barrier(MacroAssembler::StoreStore, Rtemp);
+
+  __ bind(skipMembar);
 }
 
-
 void TemplateTable::fast_accessfield(TosState state) {
   transition(atos, state);
 
@@ -3953,18 +3931,14 @@
   const Register Rindex  = R3_tmp;
   const Register Roffset = R3_tmp;
 
-  const bool gen_volatile_check = os::is_MP();
-
   // access constant pool cache
   __ get_cache_and_index_at_bcp(Rcache, Rindex, 1);
   // replace index with field offset from cache entry
   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
 
-  if (gen_volatile_check) {
-    // load flags to test volatile
-    __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  }
+  // load flags to test volatile
+  __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 
   __ verify_oop(Robj);
   __ null_check(Robj, Rtemp);
@@ -4007,16 +3981,14 @@
       ShouldNotReachHere();
   }
 
-  if (gen_volatile_check) {
-    // Check for volatile load
-    Label notVolatile;
-    __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
-    volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
-
-    __ bind(notVolatile);
-  }
+  // Check for volatile load
+  Label notVolatile;
+  __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  // TODO-AARCH64 on AArch64, load-acquire instructions can be used to get rid of this explict barrier
+  volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
+
+  __ bind(notVolatile);
 }
 
 
@@ -4038,12 +4010,8 @@
   __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord));
   __ ldr(Roffset, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
 
-  const bool gen_volatile_check = os::is_MP();
-
-  if (gen_volatile_check) {
-    // load flags to test volatile
-    __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-  }
+  // load flags to test volatile
+  __ ldr_u32(Rflags, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
 
   // make sure exception is reported in correct bcp range (getfield is next instruction)
   __ add(Rbcp, Rbcp, 1);
@@ -4051,32 +4019,30 @@
   __ sub(Rbcp, Rbcp, 1);
 
 #ifdef AARCH64
-  if (gen_volatile_check) {
-    Label notVolatile;
-    __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    __ add(Rtemp, Robj, Roffset);
-
-    if (state == itos) {
+  Label notVolatile;
+  __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  __ add(Rtemp, Robj, Roffset);
+
+  if (state == itos) {
+    __ ldar_w(R0_tos, Rtemp);
+  } else if (state == atos) {
+    if (UseCompressedOops) {
       __ ldar_w(R0_tos, Rtemp);
-    } else if (state == atos) {
-      if (UseCompressedOops) {
-        __ ldar_w(R0_tos, Rtemp);
-        __ decode_heap_oop(R0_tos);
-      } else {
-        __ ldar(R0_tos, Rtemp);
-      }
-      __ verify_oop(R0_tos);
-    } else if (state == ftos) {
-      __ ldar_w(R0_tos, Rtemp);
-      __ fmov_sw(S0_tos, R0_tos);
+      __ decode_heap_oop(R0_tos);
     } else {
-      ShouldNotReachHere();
+      __ ldar(R0_tos, Rtemp);
     }
-    __ b(done);
-
-    __ bind(notVolatile);
+    __ verify_oop(R0_tos);
+  } else if (state == ftos) {
+    __ ldar_w(R0_tos, Rtemp);
+    __ fmov_sw(S0_tos, R0_tos);
+  } else {
+    ShouldNotReachHere();
   }
+  __ b(done);
+
+  __ bind(notVolatile);
 #endif // AARCH64
 
   if (state == itos) {
@@ -4099,15 +4065,13 @@
   }
 
 #ifndef AARCH64
-  if (gen_volatile_check) {
-    // Check for volatile load
-    Label notVolatile;
-    __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
-
-    volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
-
-    __ bind(notVolatile);
-  }
+  // Check for volatile load
+  Label notVolatile;
+  __ tbz(Rflags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+
+  volatile_barrier(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
+
+  __ bind(notVolatile);
 #endif // !AARCH64
 
   __ bind(done);
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -535,8 +535,8 @@
     case doubleTag: do_ArithmeticOp_FPU(x);  return;
     case longTag:   do_ArithmeticOp_Long(x); return;
     case intTag:    do_ArithmeticOp_Int(x);  return;
+    default: ShouldNotReachHere();
   }
-  ShouldNotReachHere();
 }
 
 
@@ -735,39 +735,39 @@
         break;
       } // else fallthru
     }
+    case vmIntrinsics::_dsin:   // fall through
+    case vmIntrinsics::_dcos:   // fall through
+    case vmIntrinsics::_dtan:   // fall through
+    case vmIntrinsics::_dlog:   // fall through
     case vmIntrinsics::_dlog10: // fall through
-    case vmIntrinsics::_dlog: // fall through
-    case vmIntrinsics::_dsin: // fall through
-    case vmIntrinsics::_dtan: // fall through
-    case vmIntrinsics::_dcos: // fall through
     case vmIntrinsics::_dexp: {
       assert(x->number_of_arguments() == 1, "wrong type");
 
       address runtime_entry = NULL;
       switch (x->id()) {
-      case vmIntrinsics::_dsqrt:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
-        break;
-      case vmIntrinsics::_dsin:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
-        break;
-      case vmIntrinsics::_dcos:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
-        break;
-      case vmIntrinsics::_dtan:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
-        break;
-      case vmIntrinsics::_dlog:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
-        break;
-      case vmIntrinsics::_dlog10:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
-        break;
-      case vmIntrinsics::_dexp:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
-        break;
-      default:
-        ShouldNotReachHere();
+        case vmIntrinsics::_dsqrt:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
+          break;
+        case vmIntrinsics::_dsin:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
+          break;
+        case vmIntrinsics::_dcos:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
+          break;
+        case vmIntrinsics::_dtan:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
+          break;
+        case vmIntrinsics::_dlog:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
+          break;
+        case vmIntrinsics::_dlog10:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
+          break;
+        case vmIntrinsics::_dexp:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
+          break;
+        default:
+          ShouldNotReachHere();
       }
 
       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
@@ -781,6 +781,8 @@
       set_result(x, result);
       break;
     }
+    default:
+      break;
   }
 }
 
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -215,9 +215,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
   // Verify stub.
   address stub = find_stub(/*is_aot*/ false);
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2430,17 +2430,15 @@
   {
     Label no_block, sync;
 
-    if (os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below.
-        __ fence();
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
-      }
+    if (UseMembar) {
+      // Force this write out before the read below.
+      __ fence();
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
     }
 
     Register sync_state_addr = r_temp_4;
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2236,8 +2236,10 @@
 
   Bytecodes::Code code = bytecode();
   switch (code) {
-  case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
-  case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+    case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
+    case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+    default:
+      break;
   }
 
   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
--- a/src/hotspot/cpu/s390/assembler_s390.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/assembler_s390.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -121,14 +121,14 @@
     case bcondNotOrdered  : inverse_cc = bcondOrdered;     break;  // 14
     case bcondOrdered     : inverse_cc = bcondNotOrdered;  break;  //  1
 
-    case bcondEqual                      : inverse_cc = (branch_condition)(bcondNotEqual + bcondNotOrdered);  break; //  8
-    case bcondNotEqual + bcondNotOrdered : inverse_cc = bcondEqual;  break;                                          //  7
+    case bcondEqual                : inverse_cc = bcondNotEqualOrNotOrdered; break;  //  8
+    case bcondNotEqualOrNotOrdered : inverse_cc = bcondEqual;                break;  //  7
 
-    case bcondLow      + bcondNotOrdered : inverse_cc = (branch_condition)(bcondHigh + bcondEqual);      break;      //  5
-    case bcondNotLow                     : inverse_cc = (branch_condition)(bcondLow  + bcondNotOrdered); break;      // 10
+    case bcondLowOrNotOrdered      : inverse_cc = bcondNotLow;               break;  //  5
+    case bcondNotLow               : inverse_cc = bcondLowOrNotOrdered;      break;  // 10
 
-    case bcondHigh                       : inverse_cc = (branch_condition)(bcondLow  + bcondNotOrdered + bcondEqual); break;  //  2
-    case bcondNotHigh  + bcondNotOrdered : inverse_cc = bcondHigh; break;                                                     // 13
+    case bcondHigh                 : inverse_cc = bcondNotHighOrNotOrdered;  break;  //  2
+    case bcondNotHighOrNotOrdered  : inverse_cc = bcondHigh;                 break;  // 13
 
     default :
       fprintf(stderr, "inverse_float_condition(%d)\n", (int)cc);
--- a/src/hotspot/cpu/s390/assembler_s390.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1442,8 +1442,11 @@
     bcondNotPositive =  bcondNotHigh,
     bcondNotOrdered  =  1,  // float comparisons
     bcondOrdered     = 14,  // float comparisons
-    bcondLowOrNotOrdered  =  bcondLow|bcondNotOrdered,  // float comparisons
-    bcondHighOrNotOrdered =  bcondHigh|bcondNotOrdered, // float comparisons
+    bcondLowOrNotOrdered  =  bcondLow  | bcondNotOrdered,  // float comparisons
+    bcondHighOrNotOrdered =  bcondHigh | bcondNotOrdered,  // float comparisons
+    bcondNotLowOrNotOrdered   =  bcondNotLow   | bcondNotOrdered,  // float comparisons
+    bcondNotHighOrNotOrdered  =  bcondNotHigh  | bcondNotOrdered,  // float comparisons
+    bcondNotEqualOrNotOrdered =  bcondNotEqual | bcondNotOrdered,  // float comparisons
     // unsigned arithmetic calculation instructions
     // Mask bit#0 is not used by these instructions.
     // There is no indication of overflow for these instr.
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -529,8 +529,9 @@
     case doubleTag: do_ArithmeticOp_FPU(x);  return;
     case longTag:   do_ArithmeticOp_Long(x); return;
     case intTag:    do_ArithmeticOp_Int(x);  return;
+    default:
+      ShouldNotReachHere();
   }
-  ShouldNotReachHere();
 }
 
 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
@@ -634,47 +635,49 @@
       LIR_Opr dst = rlock_result(x);
 
       switch (x->id()) {
-      case vmIntrinsics::_dsqrt: {
-        __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
-        break;
-      }
-      case vmIntrinsics::_dabs: {
-        __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
-        break;
-      }
+        case vmIntrinsics::_dsqrt: {
+          __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
+          break;
+        }
+        case vmIntrinsics::_dabs: {
+          __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
+          break;
+        }
+        default:
+          ShouldNotReachHere();
       }
       break;
     }
+    case vmIntrinsics::_dsin:   // fall through
+    case vmIntrinsics::_dcos:   // fall through
+    case vmIntrinsics::_dtan:   // fall through
+    case vmIntrinsics::_dlog:   // fall through
     case vmIntrinsics::_dlog10: // fall through
-    case vmIntrinsics::_dlog: // fall through
-    case vmIntrinsics::_dsin: // fall through
-    case vmIntrinsics::_dtan: // fall through
-    case vmIntrinsics::_dcos: // fall through
     case vmIntrinsics::_dexp: {
       assert(x->number_of_arguments() == 1, "wrong type");
 
       address runtime_entry = NULL;
       switch (x->id()) {
-      case vmIntrinsics::_dsin:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
-        break;
-      case vmIntrinsics::_dcos:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
-        break;
-      case vmIntrinsics::_dtan:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
-        break;
-      case vmIntrinsics::_dlog:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
-        break;
-      case vmIntrinsics::_dlog10:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
-        break;
-      case vmIntrinsics::_dexp:
-        runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
-        break;
-      default:
-        ShouldNotReachHere();
+        case vmIntrinsics::_dsin:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
+          break;
+        case vmIntrinsics::_dcos:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
+          break;
+        case vmIntrinsics::_dtan:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
+          break;
+        case vmIntrinsics::_dlog:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
+          break;
+        case vmIntrinsics::_dlog10:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
+          break;
+        case vmIntrinsics::_dexp:
+          runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
+          break;
+        default:
+          ShouldNotReachHere();
       }
 
       LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
@@ -688,6 +691,8 @@
       set_result(x, result);
       break;
     }
+    default:
+      break;
   }
 }
 
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -145,9 +145,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
   // Verify stub.
   address stub = find_stub(/*is_aot*/ false);
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -593,7 +593,6 @@
   static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
 
   static bool call_far_patchable_requires_alignment_nop(address pc) {
-    if (!os::is_MP()) return false;
     int size = call_far_patchable_size();
     return ((intptr_t)(pc + size) & 0x03L) != 0;
   }
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -587,6 +587,9 @@
     case T_DOUBLE:
       __ freg2mem_opt(Z_FRET, memaddr);
       break;
+    default:
+      ShouldNotReachHere();
+      break;
   }
 }
 
@@ -616,6 +619,9 @@
     case T_DOUBLE:
       __ mem2freg_opt(Z_FRET, memaddr);
       break;
+    default:
+      ShouldNotReachHere();
+      break;
   }
 }
 
@@ -2155,18 +2161,17 @@
 
     save_native_result(masm, ret_type, workspace_slot_offset); // Make Z_R2 available as work reg.
 
-    if (os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below.
-        __ z_fence();
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(Z_thread, Z_R1, Z_R2);
-      }
+    if (UseMembar) {
+      // Force this write out before the read below.
+      __ z_fence();
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(Z_thread, Z_R1, Z_R2);
     }
+
     __ safepoint_poll(sync, Z_R1);
 
     __ load_and_test_int(Z_R0, Address(Z_thread, JavaThread::suspend_flags_offset()));
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2417,6 +2417,8 @@
   switch (code) {
     case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
     case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
+    default:
+      break;
   }
 
   {
@@ -3213,6 +3215,8 @@
     case Bytecodes::_fast_lputfield:
       __ pop_l(Z_tos);
       break;
+    default:
+      break;
   }
 
   __ bind(exit);
--- a/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/sparc/compiledIC_sparc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -142,9 +142,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
   // Verify stub.
   address stub = find_stub(/*is_aot*/ false);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2786,7 +2786,7 @@
    delayed()->
    st_ptr(G0, Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 
-   if (os::is_MP()) { membar(StoreLoad); }
+   membar(StoreLoad);
    // Check that _succ is (or remains) non-zero
    ld_ptr(Address(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), Rscratch);
    andcc(Rscratch, Rscratch, G0);
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -614,17 +614,12 @@
 // returns if membar generates anything, obviously this code should mirror
 // membar below.
 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
-  if (!os::is_MP())
-    return false;  // Not needed on single CPU
   const Membar_mask_bits effective_mask =
       Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   return (effective_mask != 0);
 }
 
 inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
-  // Uniprocessors do not need memory barriers
-  if (!os::is_MP())
-    return;
   // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
   // 8.4.4.3, a.31 and a.50.
   // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2371,17 +2371,16 @@
     //     didn't see any synchronization is progress, and escapes.
     __ set(_thread_in_native_trans, G3_scratch);
     __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
-    if(os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below
-        __ membar(Assembler::StoreLoad);
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
-      }
+
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ membar(Assembler::StoreLoad);
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
     }
 
     Label L;
--- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1373,17 +1373,16 @@
     //     didn't see any synchronization is progress, and escapes.
     __ set(_thread_in_native_trans, G3_scratch);
     __ st(G3_scratch, thread_state);
-    if (os::is_MP()) {
-      if (UseMembar) {
-        // Force this write out before the read below
-        __ membar(Assembler::StoreLoad);
-      } else {
-        // Write serialization page so VM thread can do a pseudo remote membar.
-        // We use the current thread pointer to calculate a thread specific
-        // offset to write to within the page. This minimizes bus traffic
-        // due to cache line collision.
-        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
-      }
+
+    if (UseMembar) {
+      // Force this write out before the read below
+      __ membar(Assembler::StoreLoad);
+    } else {
+      // Write serialization page so VM thread can do a pseudo remote membar.
+      // We use the current thread pointer to calculate a thread specific
+      // offset to write to within the page. This minimizes bus traffic
+      // due to cache line collision.
+      __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
     }
 
     Label L;
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -972,7 +972,6 @@
     return ip;
 
   case 0xF0:                    // Lock
-    assert(os::is_MP(), "only on MP");
     goto again_after_prefix;
 
   case 0xF3:                    // For SSE
@@ -1773,7 +1772,7 @@
 
 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xE6);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -1781,7 +1780,7 @@
 
 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5B);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -1889,7 +1888,7 @@
 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
-  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xE6);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2199,7 +2198,7 @@
 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
-  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x28);
@@ -2209,7 +2208,7 @@
 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x28);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2234,7 +2233,7 @@
 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse3(), ""));
   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
-  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
   emit_int8(0x12);
@@ -2465,7 +2464,7 @@
 
 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2474,7 +2473,7 @@
 void Assembler::movdqa(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2484,7 +2483,7 @@
 void Assembler::movdqu(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2493,7 +2492,7 @@
 
 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2502,7 +2501,7 @@
 void Assembler::movdqu(Address dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.reset_is_clear_context();
   simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -2513,7 +2512,7 @@
 // Move Unaligned 256bit Vector
 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
   assert(UseAVX > 0, "");
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -2522,7 +2521,7 @@
 void Assembler::vmovdqu(XMMRegister dst, Address src) {
   assert(UseAVX > 0, "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2532,7 +2531,7 @@
 void Assembler::vmovdqu(Address dst, XMMRegister src) {
   assert(UseAVX > 0, "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.reset_is_clear_context();
   // swap src<->dst for encoding
@@ -2580,7 +2579,6 @@
 
 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len) {
   assert(VM_Version::supports_avx512vlbw(), "");
-  assert(is_vector_masking(), "");    // For stub code use only
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -2604,7 +2602,6 @@
 }
 
 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
-  assert(is_vector_masking(), "");
   assert(VM_Version::supports_avx512vlbw(), "");
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -2645,7 +2642,7 @@
 
 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2655,7 +2652,7 @@
 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false , /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.set_is_evex_instruction();
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -2667,7 +2664,7 @@
   assert(VM_Version::supports_evex(), "");
   assert(src != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -2678,7 +2675,7 @@
 
 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6F);
@@ -2688,7 +2685,7 @@
 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.set_is_evex_instruction();
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
@@ -2700,7 +2697,7 @@
   assert(VM_Version::supports_evex(), "");
   assert(src != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -3416,7 +3413,7 @@
 
 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
   assert(VM_Version::supports_avx2(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x00);
   emit_int8(0xC0 | encode);
@@ -3425,7 +3422,7 @@
 
 void Assembler::vperm2i128(XMMRegister dst,  XMMRegister nds, XMMRegister src, int imm8) {
   assert(VM_Version::supports_avx2(), "");
-  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x46);
   emit_int8(0xC0 | encode);
@@ -3434,7 +3431,7 @@
 
 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x06);
   emit_int8(0xC0 | encode);
@@ -3464,7 +3461,7 @@
 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
   assert(VM_Version::supports_sse4_2(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x61);
   emit_operand(dst, src);
@@ -3473,7 +3470,7 @@
 
 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
   assert(VM_Version::supports_sse4_2(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x61);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3483,7 +3480,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse2(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x74);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3492,7 +3489,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x74);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3521,7 +3518,6 @@
 }
 
 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
-  assert(is_vector_masking(), "");
   assert(VM_Version::supports_avx512vlbw(), "");
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@@ -3546,7 +3542,6 @@
 }
 
 void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
-  assert(is_vector_masking(), "");
   assert(VM_Version::supports_avx512vlbw(), "");
   InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
   attributes.reset_is_clear_context();
@@ -3585,7 +3580,6 @@
 
 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx512vlbw(), "");
-  assert(is_vector_masking(), "");    // For stub code use only
   InstructionMark im(this);
   InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
@@ -3600,7 +3594,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse2(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x75);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3609,7 +3603,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x75);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3640,7 +3634,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse2(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x76);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3649,7 +3643,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x76);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3658,7 +3652,7 @@
 // In this context, kdst is written the mask used to process the equal components
 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   attributes.reset_is_clear_context();
   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -3669,7 +3663,7 @@
 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -3682,7 +3676,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse4_1(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x29);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3691,7 +3685,7 @@
 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x29);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3700,7 +3694,7 @@
 // In this context, kdst is written the mask used to process the equal components
 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -3712,7 +3706,7 @@
 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
@@ -3724,7 +3718,7 @@
 
 void Assembler::pmovmskb(Register dst, XMMRegister src) {
   assert(VM_Version::supports_sse2(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD7);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3732,7 +3726,7 @@
 
 void Assembler::vpmovmskb(Register dst, XMMRegister src) {
   assert(VM_Version::supports_avx2(), "");
-  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD7);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -3913,7 +3907,6 @@
 
 
 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
-  assert(is_vector_masking(), "");
   assert(VM_Version::supports_avx512vlbw(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
@@ -3938,7 +3931,6 @@
 }
 
 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
-  assert(is_vector_masking(), "");
   assert(VM_Version::supports_avx512vlbw(), "");
   assert(src != xnoreg, "sanity");
   InstructionMark im(this);
@@ -4001,7 +3993,7 @@
 
 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_vpopcntdq(), "must support vpopcntdq feature");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x55);
@@ -4111,7 +4103,7 @@
   assert(isByte(mode), "invalid value");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x70);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4123,7 +4115,7 @@
          vector_len == AVX_256bit? VM_Version::supports_avx2() :
          0, "");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x70);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4135,7 +4127,7 @@
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x70);
@@ -4201,7 +4193,7 @@
   assert(VM_Version::supports_sse4_1(), "");
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x17);
   emit_operand(dst, src);
@@ -4209,7 +4201,7 @@
 
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse4_1(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x17);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4218,7 +4210,7 @@
 void Assembler::vptest(XMMRegister dst, Address src) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   assert(dst != xnoreg, "sanity");
   // swap src<->dst for encoding
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -4228,7 +4220,7 @@
 
 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x17);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4238,7 +4230,7 @@
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x60);
@@ -4247,7 +4239,7 @@
 
 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x60);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4257,7 +4249,7 @@
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x62);
@@ -4266,7 +4258,7 @@
 
 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x62);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4274,7 +4266,7 @@
 
 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x6C);
@@ -4323,7 +4315,7 @@
 
 void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x53);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4331,7 +4323,7 @@
 
 void Assembler::rcpss(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
   emit_int8(0x53);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4455,7 +4447,7 @@
 
 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
   assert(VM_Version::supports_ssse3(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8((unsigned char)0x0F);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4466,7 +4458,7 @@
   assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
          vector_len == AVX_256bit? VM_Version::supports_avx2() :
          0, "");
-  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8((unsigned char)0x0F);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -4475,7 +4467,7 @@
 
 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x3);
@@ -4628,7 +4620,7 @@
   if (UseAVX > 0 ) {
     assert(VM_Version::supports_avx(), "");
     InstructionMark im(this);
-    InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
+    InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
     vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
     emit_int8((unsigned char)0xAE);
     emit_operand(as_Register(3), dst);
@@ -5102,7 +5094,7 @@
 
 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
@@ -5112,7 +5104,7 @@
 void Assembler::addpd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5123,7 +5115,7 @@
 
 void Assembler::addps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5131,7 +5123,7 @@
 
 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
@@ -5140,7 +5132,7 @@
 
 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5149,7 +5141,7 @@
 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5160,7 +5152,7 @@
 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x58);
@@ -5169,7 +5161,7 @@
 
 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -5178,7 +5170,7 @@
 
 void Assembler::subps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5186,7 +5178,7 @@
 
 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -5195,7 +5187,7 @@
 
 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5204,7 +5196,7 @@
 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5215,7 +5207,7 @@
 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5C);
@@ -5224,7 +5216,7 @@
 
 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -5234,7 +5226,7 @@
 void Assembler::mulpd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5244,7 +5236,7 @@
 
 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5252,7 +5244,7 @@
 
 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -5261,7 +5253,7 @@
 
 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5270,7 +5262,7 @@
 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5281,7 +5273,7 @@
 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x59);
@@ -5290,7 +5282,7 @@
 
 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
   assert(VM_Version::supports_fma(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8((unsigned char)0xB8);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5298,7 +5290,7 @@
 
 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
   assert(VM_Version::supports_fma(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8((unsigned char)0xB8);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5307,7 +5299,7 @@
 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
   assert(VM_Version::supports_fma(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8((unsigned char)0xB8);
@@ -5317,7 +5309,7 @@
 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
   assert(VM_Version::supports_fma(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8((unsigned char)0xB8);
@@ -5326,7 +5318,7 @@
 
 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
@@ -5335,7 +5327,7 @@
 
 void Assembler::divps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5343,7 +5335,7 @@
 
 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
@@ -5352,7 +5344,7 @@
 
 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5361,7 +5353,7 @@
 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5372,7 +5364,7 @@
 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x5E);
@@ -5381,7 +5373,7 @@
 
 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
@@ -5391,7 +5383,7 @@
 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5401,7 +5393,7 @@
 
 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5410,7 +5402,7 @@
 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x51);
@@ -5419,7 +5411,7 @@
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
@@ -5428,7 +5420,7 @@
 
 void Assembler::andps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5437,7 +5429,7 @@
 void Assembler::andps(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
@@ -5447,7 +5439,7 @@
 void Assembler::andpd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5457,7 +5449,7 @@
 
 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
@@ -5466,7 +5458,7 @@
 
 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5475,7 +5467,7 @@
 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5486,7 +5478,7 @@
 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x54);
@@ -5495,7 +5487,7 @@
 
 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x15);
@@ -5504,7 +5496,7 @@
 
 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x14);
@@ -5513,7 +5505,7 @@
 
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
@@ -5522,7 +5514,7 @@
 
 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5531,7 +5523,7 @@
 void Assembler::xorpd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5542,7 +5534,7 @@
 void Assembler::xorps(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
@@ -5551,7 +5543,7 @@
 
 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
@@ -5560,7 +5552,7 @@
 
 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5569,7 +5561,7 @@
 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5580,7 +5572,7 @@
 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8(0x57);
@@ -5600,7 +5592,7 @@
 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx() && (vector_len == 0) ||
          VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x02);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5624,7 +5616,7 @@
 
 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFE);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5633,7 +5625,7 @@
 void Assembler::paddd(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFE);
   emit_operand(dst, src);
@@ -5641,7 +5633,7 @@
 
 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD4);
@@ -5658,7 +5650,7 @@
 
 void Assembler::phaddd(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse3(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x02);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5682,7 +5674,7 @@
 
 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFE);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5690,7 +5682,7 @@
 
 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD4);
@@ -5720,7 +5712,7 @@
 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFE);
@@ -5730,7 +5722,7 @@
 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5755,7 +5747,7 @@
 }
 
 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFA);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5763,7 +5755,7 @@
 
 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFB);
@@ -5788,7 +5780,7 @@
 
 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFA);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5796,7 +5788,7 @@
 
 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFB);
@@ -5826,7 +5818,7 @@
 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xFA);
@@ -5836,7 +5828,7 @@
 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -5854,7 +5846,7 @@
 
 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
   assert(VM_Version::supports_sse4_1(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x40);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5870,7 +5862,7 @@
 
 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x40);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5878,7 +5870,7 @@
 
 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 2, "requires some form of EVEX");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x40);
@@ -5898,7 +5890,7 @@
 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x40);
@@ -5908,7 +5900,7 @@
 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 2, "requires some form of EVEX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_is_evex_instruction();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -5929,7 +5921,7 @@
 
 void Assembler::pslld(XMMRegister dst, int shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -5939,7 +5931,7 @@
 
 void Assembler::psllq(XMMRegister dst, int shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
   int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x73);
@@ -5957,7 +5949,7 @@
 
 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xF2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -5965,7 +5957,7 @@
 
 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xF3);
@@ -5985,7 +5977,7 @@
 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM6 is for /6 encoding: 66 0F 72 /6 ib
   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -5995,7 +5987,7 @@
 
 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   // XMM6 is for /6 encoding: 66 0F 73 /6 ib
   int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -6014,7 +6006,7 @@
 
 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xF2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6022,7 +6014,7 @@
 
 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xF3);
@@ -6042,7 +6034,7 @@
 
 void Assembler::psrld(XMMRegister dst, int shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM2 is for /2 encoding: 66 0F 72 /2 ib
   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -6054,7 +6046,7 @@
   // Do not confuse it with psrldq SSE2 instruction which
   // shifts 128 bit value in xmm register by number of bytes.
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
   int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -6073,7 +6065,7 @@
 
 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6081,7 +6073,7 @@
 
 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD3);
@@ -6100,7 +6092,7 @@
 
 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM2 is for /2 encoding: 66 0F 72 /2 ib
   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -6110,7 +6102,7 @@
 
 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   // XMM2 is for /2 encoding: 66 0F 73 /2 ib
   int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -6129,7 +6121,7 @@
 
 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6137,7 +6129,7 @@
 
 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xD3);
@@ -6146,7 +6138,7 @@
 
 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx512bw(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x10);
@@ -6155,7 +6147,7 @@
 
 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx512bw(), "");
-  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x12);
@@ -6175,7 +6167,7 @@
 
 void Assembler::psrad(XMMRegister dst, int shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM4 is for /4 encoding: 66 0F 72 /4 ib
   int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -6193,7 +6185,7 @@
 
 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xE2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6211,7 +6203,7 @@
 
 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   // XMM4 is for /4 encoding: 66 0F 71 /4 ib
   int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8(0x72);
@@ -6229,7 +6221,7 @@
 
 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xE2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6239,7 +6231,7 @@
 // logical operations packed integers
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6247,7 +6239,7 @@
 
 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6256,7 +6248,7 @@
 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDB);
@@ -6265,7 +6257,7 @@
 
 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6274,7 +6266,7 @@
 
 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDF);
@@ -6283,7 +6275,7 @@
 
 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xDF);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6292,7 +6284,7 @@
 
 void Assembler::por(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6300,7 +6292,7 @@
 
 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6309,7 +6301,7 @@
 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEB);
@@ -6318,7 +6310,7 @@
 
 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEB);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6327,7 +6319,7 @@
 
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEF);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6335,7 +6327,7 @@
 
 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEF);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6344,7 +6336,7 @@
 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
   assert(UseAVX > 0, "requires some form of AVX");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xEF);
@@ -6378,7 +6370,7 @@
 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x38);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6392,7 +6384,7 @@
   assert(dst != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x38);
@@ -6405,7 +6397,7 @@
 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x38);
@@ -6422,7 +6414,7 @@
   assert(dst != xnoreg, "sanity");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   attributes.set_is_evex_instruction();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -6438,7 +6430,7 @@
 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x3A);
@@ -6454,7 +6446,7 @@
 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6468,7 +6460,7 @@
   assert(dst != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
@@ -6481,7 +6473,7 @@
 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6497,7 +6489,7 @@
   assert(dst != xnoreg, "sanity");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x18);
@@ -6512,7 +6504,7 @@
 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x1A);
@@ -6527,7 +6519,7 @@
   assert(dst != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_is_evex_instruction();
   vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -6544,7 +6536,7 @@
 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx2(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6558,7 +6550,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   attributes.reset_is_clear_context();
   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -6572,7 +6564,7 @@
 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
@@ -6589,7 +6581,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -6606,7 +6598,7 @@
 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx512dq(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x39);
@@ -6621,7 +6613,7 @@
 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x3B);
@@ -6636,7 +6628,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -6652,7 +6644,7 @@
 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6666,7 +6658,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   attributes.reset_is_clear_context();
   vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -6680,7 +6672,7 @@
 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
@@ -6697,7 +6689,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -6714,7 +6706,7 @@
 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_avx512dq(), "");
   assert(imm8 <= 0x03, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x19);
@@ -6729,7 +6721,7 @@
 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
   assert(VM_Version::supports_evex(), "");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x1B);
@@ -6744,7 +6736,7 @@
   assert(src != xnoreg, "sanity");
   assert(imm8 <= 0x01, "imm8: %u", imm8);
   InstructionMark im(this);
-  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */  EVEX_64bit);
   attributes.reset_is_clear_context();
   attributes.set_is_evex_instruction();
@@ -6803,7 +6795,7 @@
 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(UseAVX >= 2, "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x58);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6813,7 +6805,7 @@
   assert(VM_Version::supports_avx2(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   // swap src<->dst for encoding
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -6824,7 +6816,7 @@
 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx2(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x59);
@@ -6835,7 +6827,7 @@
   assert(VM_Version::supports_avx2(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   // swap src<->dst for encoding
@@ -6846,7 +6838,7 @@
 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(vector_len != Assembler::AVX_128bit, "");
   assert(VM_Version::supports_avx512dq(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x5A);
@@ -6858,7 +6850,7 @@
   assert(VM_Version::supports_avx512dq(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
   // swap src<->dst for encoding
@@ -6872,7 +6864,7 @@
 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL
 void Assembler::vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x18);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6882,7 +6874,7 @@
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
   // swap src<->dst for encoding
   vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -6893,7 +6885,7 @@
 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL
 void Assembler::vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
   assert(VM_Version::supports_avx(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_rex_vex_w_reverted();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x19);
@@ -6904,7 +6896,7 @@
   assert(VM_Version::supports_avx(), "");
   assert(dst != xnoreg, "sanity");
   InstructionMark im(this);
-  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
   attributes.set_rex_vex_w_reverted();
   // swap src<->dst for encoding
@@ -6939,7 +6931,7 @@
 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x7C);
@@ -6949,7 +6941,7 @@
 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
   assert(VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
   attributes.set_is_evex_instruction();
   int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
   emit_int8(0x7C);
@@ -6974,7 +6966,7 @@
 // Carry-Less Multiplication Quadword
 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
   assert(VM_Version::supports_clmul(), "");
-  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x44);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -6984,7 +6976,7 @@
 // Carry-Less Multiplication Quadword
 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
   assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
-  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8(0x44);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -7575,7 +7567,7 @@
   emit_int8(byte3);
 
   // P2: byte 4 as zL'Lbv'aaa
-  // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
+  // kregs are implemented in the low 3 bits as aaa
   int byte4 = (_attributes->is_no_reg_mask()) ?
               0 :
               _attributes->get_embedded_opmask_register_specifier();
@@ -7726,7 +7718,7 @@
 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(!VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xC2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -7736,7 +7728,7 @@
 void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(!VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8((unsigned char)0x4B);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -7747,7 +7739,7 @@
 void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(!VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
   emit_int8((unsigned char)0xC2);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -7757,7 +7749,7 @@
 void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
   assert(VM_Version::supports_avx(), "");
   assert(!VM_Version::supports_evex(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8((unsigned char)0x4A);
   emit_int8((unsigned char)(0xC0 | encode));
@@ -7767,7 +7759,7 @@
 
 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
   assert(VM_Version::supports_avx2(), "");
-  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ true);
+  InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
   int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
   emit_int8((unsigned char)0x02);
   emit_int8((unsigned char)(0xC0 | encode));
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -871,11 +871,6 @@
   void clear_managed(void) { _is_managed = false; }
   bool is_managed(void) { return _is_managed; }
 
-  // Following functions are for stub code use only
-  void set_vector_masking(void) { _vector_masking = true; }
-  void clear_vector_masking(void) { _vector_masking = false; }
-  bool is_vector_masking(void) { return _vector_masking; }
-
   void lea(Register dst, Address src);
 
   void mov(Register dst, Register src);
@@ -1350,40 +1345,38 @@
 
   // Serializes memory and blows flags
   void membar(Membar_mask_bits order_constraint) {
-    if (os::is_MP()) {
-      // We only have to handle StoreLoad
-      if (order_constraint & StoreLoad) {
-        // All usable chips support "locked" instructions which suffice
-        // as barriers, and are much faster than the alternative of
-        // using cpuid instruction. We use here a locked add [esp-C],0.
-        // This is conveniently otherwise a no-op except for blowing
-        // flags, and introducing a false dependency on target memory
-        // location. We can't do anything with flags, but we can avoid
-        // memory dependencies in the current method by locked-adding
-        // somewhere else on the stack. Doing [esp+C] will collide with
-        // something on stack in current method, hence we go for [esp-C].
-        // It is convenient since it is almost always in data cache, for
-        // any small C.  We need to step back from SP to avoid data
-        // dependencies with other things on below SP (callee-saves, for
-        // example). Without a clear way to figure out the minimal safe
-        // distance from SP, it makes sense to step back the complete
-        // cache line, as this will also avoid possible second-order effects
-        // with locked ops against the cache line. Our choice of offset
-        // is bounded by x86 operand encoding, which should stay within
-        // [-128; +127] to have the 8-byte displacement encoding.
-        //
-        // Any change to this code may need to revisit other places in
-        // the code where this idiom is used, in particular the
-        // orderAccess code.
-
-        int offset = -VM_Version::L1_line_size();
-        if (offset < -128) {
-          offset = -128;
-        }
-
-        lock();
-        addl(Address(rsp, offset), 0);// Assert the lock# signal here
+    // We only have to handle StoreLoad
+    if (order_constraint & StoreLoad) {
+      // All usable chips support "locked" instructions which suffice
+      // as barriers, and are much faster than the alternative of
+      // using cpuid instruction. We use here a locked add [esp-C],0.
+      // This is conveniently otherwise a no-op except for blowing
+      // flags, and introducing a false dependency on target memory
+      // location. We can't do anything with flags, but we can avoid
+      // memory dependencies in the current method by locked-adding
+      // somewhere else on the stack. Doing [esp+C] will collide with
+      // something on stack in current method, hence we go for [esp-C].
+      // It is convenient since it is almost always in data cache, for
+      // any small C.  We need to step back from SP to avoid data
+      // dependencies with other things on below SP (callee-saves, for
+      // example). Without a clear way to figure out the minimal safe
+      // distance from SP, it makes sense to step back the complete
+      // cache line, as this will also avoid possible second-order effects
+      // with locked ops against the cache line. Our choice of offset
+      // is bounded by x86 operand encoding, which should stay within
+      // [-128; +127] to have the 8-byte displacement encoding.
+      //
+      // Any change to this code may need to revisit other places in
+      // the code where this idiom is used, in particular the
+      // orderAccess code.
+
+      int offset = -VM_Version::L1_line_size();
+      if (offset < -128) {
+        offset = -128;
       }
+
+      lock();
+      addl(Address(rsp, offset), 0);// Assert the lock# signal here
     }
   }
 
@@ -2210,7 +2203,7 @@
     int vector_len,     // The length of vector to be applied in encoding - for both AVX and EVEX
     bool rex_vex_w,     // Width of data: if 32-bits or less, false, else if 64-bit or specially defined, true
     bool legacy_mode,   // Details if either this instruction is conditionally encoded to AVX or earlier if true else possibly EVEX
-    bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else k1 is used under the same condition
+    bool no_reg_mask,   // when true, k0 is used when EVEX encoding is chosen, else embedded_opmask_register_specifier is used
     bool uses_vl)       // This instruction may have legacy constraints based on vector length for EVEX
     :
       _avx_vector_len(vector_len),
@@ -2225,7 +2218,7 @@
       _evex_encoding(0),
       _is_clear_context(true),
       _is_extended_context(false),
-      _embedded_opmask_register_specifier(1), // hard code k1, it will be initialized for now
+      _embedded_opmask_register_specifier(0), // hard code k0
       _current_assembler(NULL) {
     if (UseAVX < 3) _legacy_mode = true;
   }
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1906,9 +1906,7 @@
     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
     Register addr = op->addr()->as_register();
-    if (os::is_MP()) {
-      __ lock();
-    }
+    __ lock();
     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
 
   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
@@ -1928,24 +1926,18 @@
         __ encode_heap_oop(cmpval);
         __ mov(rscratch1, newval);
         __ encode_heap_oop(rscratch1);
-        if (os::is_MP()) {
-          __ lock();
-        }
+        __ lock();
         // cmpval (rax) is implicitly used by this instruction
         __ cmpxchgl(rscratch1, Address(addr, 0));
       } else
 #endif
       {
-        if (os::is_MP()) {
-          __ lock();
-        }
+        __ lock();
         __ cmpxchgptr(newval, Address(addr, 0));
       }
     } else {
       assert(op->code() == lir_cas_int, "lir_cas_int expected");
-      if (os::is_MP()) {
-        __ lock();
-      }
+      __ lock();
       __ cmpxchgl(newval, Address(addr, 0));
     }
 #ifdef _LP64
@@ -1958,9 +1950,7 @@
     assert(cmpval != newval, "cmp and new values must be in different registers");
     assert(cmpval != addr, "cmp and addr must be in different registers");
     assert(newval != addr, "new value and addr must be in different registers");
-    if (os::is_MP()) {
-      __ lock();
-    }
+    __ lock();
     __ cmpxchgq(newval, Address(addr, 0));
 #endif // _LP64
   } else {
@@ -2403,8 +2393,9 @@
           if (UseAVX > 2 && !VM_Version::supports_avx512vl()) {
             assert(tmp->is_valid(), "need temporary");
             __ vpandn(dest->as_xmm_double_reg(), tmp->as_xmm_double_reg(), value->as_xmm_double_reg(), 2);
-          } else {
+          } else
 #endif
+          {
             if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
               __ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
             }
@@ -2803,28 +2794,26 @@
 
 
 void LIR_Assembler::align_call(LIR_Code code) {
-  if (os::is_MP()) {
-    // make sure that the displacement word of the call ends up word aligned
-    int offset = __ offset();
-    switch (code) {
-      case lir_static_call:
-      case lir_optvirtual_call:
-      case lir_dynamic_call:
-        offset += NativeCall::displacement_offset;
-        break;
-      case lir_icvirtual_call:
-        offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
-      break;
-      case lir_virtual_call:  // currently, sparc-specific for niagara
-      default: ShouldNotReachHere();
-    }
-    __ align(BytesPerWord, offset);
+  // make sure that the displacement word of the call ends up word aligned
+  int offset = __ offset();
+  switch (code) {
+  case lir_static_call:
+  case lir_optvirtual_call:
+  case lir_dynamic_call:
+    offset += NativeCall::displacement_offset;
+    break;
+  case lir_icvirtual_call:
+    offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
+    break;
+  case lir_virtual_call:  // currently, sparc-specific for niagara
+  default: ShouldNotReachHere();
   }
+  __ align(BytesPerWord, offset);
 }
 
 
 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
-  assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
+  assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
   __ call(AddressLiteral(op->addr(), rtype));
   add_call_info(code_offset(), op->info());
@@ -2834,8 +2823,7 @@
 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
   __ ic_call(op->addr());
   add_call_info(code_offset(), op->info());
-  assert(!os::is_MP() ||
-         (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
+  assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
 }
 
@@ -2855,14 +2843,13 @@
   }
 
   int start = __ offset();
-  if (os::is_MP()) {
-    // make sure that the displacement word of the call ends up word aligned
-    __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
-  }
+
+  // make sure that the displacement word of the call ends up word aligned
+  __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
   __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
   __ mov_metadata(rbx, (Metadata*)NULL);
   // must be set to -1 at code generation time
-  assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
+  assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
   __ jump(RuntimeAddress(__ pc()));
 
@@ -3991,9 +3978,7 @@
 
   if (data->type() == T_INT) {
     if (code == lir_xadd) {
-      if (os::is_MP()) {
-        __ lock();
-      }
+      __ lock();
       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
     } else {
       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
@@ -4016,9 +4001,7 @@
 #ifdef _LP64
     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
     if (code == lir_xadd) {
-      if (os::is_MP()) {
-        __ lock();
-      }
+      __ lock();
       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
     } else {
       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -65,7 +65,7 @@
   // test if object header is still the same (i.e. unlocked), and if so, store the
   // displaced header address in the object header - if it is not the same, get the
   // object header instead
-  if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
+  MacroAssembler::lock(); // must be immediately before cmpxchg!
   cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
   // if the object header was the same, we're done
   if (PrintBiasedLockingStatistics) {
@@ -126,7 +126,7 @@
   // test if object header is pointing to the displaced header, and if so, restore
   // the displaced header in the object - if the object header is not pointing to
   // the displaced header, get the object header instead
-  if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
+  MacroAssembler::lock(); // must be immediately before cmpxchg!
   cmpxchgptr(hdr, Address(obj, hdr_offset));
   // if the object header was not pointing to the displaced header,
   // we do unlocking via runtime call
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -198,9 +198,7 @@
 void CompiledDirectStaticCall::verify() {
   // Verify call.
   _call->verify();
-  if (os::is_MP()) {
-    _call->verify_alignment();
-  }
+  _call->verify_alignment();
 
 #ifdef ASSERT
   CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1191,7 +1191,7 @@
     assert(lock_offset == 0,
            "displaced header must be first word in BasicObjectLock");
 
-    if (os::is_MP()) lock();
+    lock();
     cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     if (PrintBiasedLockingStatistics) {
       cond_inc32(Assembler::zero,
@@ -1288,7 +1288,7 @@
     jcc(Assembler::zero, done);
 
     // Atomic swap back the old header
-    if (os::is_MP()) lock();
+    lock();
     cmpxchgptr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 
     // zero for simple unlock of a stack-lock case
--- a/src/hotspot/cpu/x86/jniFastGetField_x86_32.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/jniFastGetField_x86_32.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -75,15 +75,11 @@
   __ mov32 (rcx, counter);
   __ testb (rcx, 1);
   __ jcc (Assembler::notZero, slow);
-  if (os::is_MP()) {
-    __ mov(rax, rcx);
-    __ andptr(rax, 1);                         // rax, must end up 0
-    __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
-                                              // obj, notice rax, is 0.
-                                              // rdx is data dependent on rcx.
-  } else {
-    __ movptr (rdx, Address(rsp, 2*wordSize));  // obj
-  }
+  __ mov(rax, rcx);
+  __ andptr(rax, 1);                         // rax, must end up 0
+  __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
+                                            // obj, notice rax, is 0.
+                                            // rdx is data dependent on rcx.
   __ movptr(rax, Address(rsp, 3*wordSize));  // jfieldID
 
   __ clear_jweak_tag(rdx);
@@ -103,17 +99,13 @@
   }
 
   Address ca1;
-  if (os::is_MP()) {
-    __ lea(rdx, counter);
-    __ xorptr(rdx, rax);
-    __ xorptr(rdx, rax);
-    __ cmp32(rcx, Address(rdx, 0));
-    // ca1 is the same as ca because
-    // rax, ^ counter_addr ^ rax, = address
-    // ca1 is data dependent on rax,.
-  } else {
-    __ cmp32(rcx, counter);
-  }
+  __ lea(rdx, counter);
+  __ xorptr(rdx, rax);
+  __ xorptr(rdx, rax);
+  __ cmp32(rcx, Address(rdx, 0));
+  // ca1 is the same as ca because
+  // rax, ^ counter_addr ^ rax, = address
+  // ca1 is data dependent on rax,.
   __ jcc (Assembler::notEqual, slow);
 
 #ifndef _WINDOWS
@@ -131,7 +123,8 @@
     case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
     case T_CHAR:    slow_case_addr = jni_GetCharField_addr();    break;
     case T_SHORT:   slow_case_addr = jni_GetShortField_addr();   break;
-    case T_INT:     slow_case_addr = jni_GetIntField_addr();
+    case T_INT:     slow_case_addr = jni_GetIntField_addr();     break;
+    default:        ShouldNotReachHere();
   }
   // tail call
   __ jump (ExternalAddress(slow_case_addr));
@@ -195,15 +188,11 @@
   __ mov32 (rcx, counter);
   __ testb (rcx, 1);
   __ jcc (Assembler::notZero, slow);
-  if (os::is_MP()) {
-    __ mov(rax, rcx);
-    __ andptr(rax, 1);                         // rax, must end up 0
-    __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
-                                              // obj, notice rax, is 0.
-                                              // rdx is data dependent on rcx.
-  } else {
-    __ movptr(rdx, Address(rsp, 3*wordSize));  // obj
-  }
+  __ mov(rax, rcx);
+  __ andptr(rax, 1);                         // rax, must end up 0
+  __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
+                                            // obj, notice rax, is 0.
+                                            // rdx is data dependent on rcx.
   __ movptr(rsi, Address(rsp, 4*wordSize));  // jfieldID
 
   __ clear_jweak_tag(rdx);
@@ -219,19 +208,15 @@
   __ movl(rdx, Address(rdx, rsi, Address::times_1, 4));
 #endif // _LP64
 
-  if (os::is_MP()) {
-    __ lea(rsi, counter);
-    __ xorptr(rsi, rdx);
-    __ xorptr(rsi, rax);
-    __ xorptr(rsi, rdx);
-    __ xorptr(rsi, rax);
-    __ cmp32(rcx, Address(rsi, 0));
-    // ca1 is the same as ca because
-    // rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
-    // ca1 is data dependent on both rax, and rdx.
-  } else {
-    __ cmp32(rcx, counter);
-  }
+  __ lea(rsi, counter);
+  __ xorptr(rsi, rdx);
+  __ xorptr(rsi, rax);
+  __ xorptr(rsi, rdx);
+  __ xorptr(rsi, rax);
+  __ cmp32(rcx, Address(rsi, 0));
+  // ca1 is the same as ca because
+  // rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
+  // ca1 is data dependent on both rax, and rdx.
   __ jcc (Assembler::notEqual, slow);
 
   __ pop (rsi);
@@ -287,15 +272,11 @@
   __ mov32 (rcx, counter);
   __ testb (rcx, 1);
   __ jcc (Assembler::notZero, slow);
-  if (os::is_MP()) {
-    __ mov(rax, rcx);
-    __ andptr(rax, 1);                         // rax, must end up 0
-    __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
-                                              // obj, notice rax, is 0.
-                                              // rdx is data dependent on rcx.
-  } else {
-    __ movptr(rdx, Address(rsp, 2*wordSize)); // obj
-  }
+  __ mov(rax, rcx);
+  __ andptr(rax, 1);                         // rax, must end up 0
+  __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
+                                            // obj, notice rax, is 0.
+                                            // rdx is data dependent on rcx.
   __ movptr(rax, Address(rsp, 3*wordSize));  // jfieldID
 
   __ clear_jweak_tag(rdx);
@@ -317,20 +298,16 @@
   }
 
   Address ca1;
-  if (os::is_MP()) {
-    __ fst_s (Address(rsp, -4));
-    __ lea(rdx, counter);
-    __ movl (rax, Address(rsp, -4));
-    // garbage hi-order bits on 64bit are harmless.
-    __ xorptr(rdx, rax);
-    __ xorptr(rdx, rax);
-    __ cmp32(rcx, Address(rdx, 0));
-                                          // rax, ^ counter_addr ^ rax, = address
-                                          // ca1 is data dependent on the field
-                                          // access.
-  } else {
-    __ cmp32(rcx, counter);
-  }
+  __ fst_s (Address(rsp, -4));
+  __ lea(rdx, counter);
+  __ movl (rax, Address(rsp, -4));
+  // garbage hi-order bits on 64bit are harmless.
+  __ xorptr(rdx, rax);
+  __ xorptr(rdx, rax);
+  __ cmp32(rcx, Address(rdx, 0));
+  // rax, ^ counter_addr ^ rax, = address
+  // ca1 is data dependent on the field
+  // access.
   __ jcc (Assembler::notEqual, slow_with_pop);
 
 #ifndef _WINDOWS
--- a/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -77,12 +77,11 @@
   __ mov   (robj, c_rarg1);
   __ testb (rcounter, 1);
   __ jcc (Assembler::notZero, slow);
-  if (os::is_MP()) {
-    __ xorptr(robj, rcounter);
-    __ xorptr(robj, rcounter);                   // obj, since
-                                                // robj ^ rcounter ^ rcounter == robj
-                                                // robj is data dependent on rcounter.
-  }
+
+  __ xorptr(robj, rcounter);
+  __ xorptr(robj, rcounter);  // obj, since
+                              // robj ^ rcounter ^ rcounter == robj
+                              // robj is data dependent on rcounter.
 
   __ mov   (roffset, c_rarg2);
   __ shrptr(roffset, 2);                         // offset
@@ -104,15 +103,12 @@
     default:        ShouldNotReachHere();
   }
 
-  if (os::is_MP()) {
-    __ lea(rcounter_addr, counter);
-    // ca is data dependent on rax.
-    __ xorptr(rcounter_addr, rax);
-    __ xorptr(rcounter_addr, rax);
-    __ cmpl (rcounter, Address(rcounter_addr, 0));
-  } else {
-    __ cmp32 (rcounter, counter);
-  }
+  // create data dependency on rax
+  __ lea(rcounter_addr, counter);
+  __ xorptr(rcounter_addr, rax);
+  __ xorptr(rcounter_addr, rax);
+  __ cmpl (rcounter, Address(rcounter_addr, 0));
+
   __ jcc (Assembler::notEqual, slow);
 
   __ ret (0);
@@ -181,12 +177,11 @@
   __ mov   (robj, c_rarg1);
   __ testb (rcounter, 1);
   __ jcc (Assembler::notZero, slow);
-  if (os::is_MP()) {
-    __ xorptr(robj, rcounter);
-    __ xorptr(robj, rcounter);                   // obj, since
-                                                // robj ^ rcounter ^ rcounter == robj
-                                                // robj is data dependent on rcounter.
-  }
+
+  __ xorptr(robj, rcounter);
+  __ xorptr(robj, rcounter);  // obj, since
+                              // robj ^ rcounter ^ rcounter == robj
+                              // robj is data dependent on rcounter.
 
   // Both robj and rtmp are clobbered by try_resolve_jobject_in_native.
   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
@@ -204,16 +199,12 @@
     default:        ShouldNotReachHere();
   }
 
-  if (os::is_MP()) {
-    __ lea(rcounter_addr, counter);
-    __ movdq (rax, xmm0);
-    // counter address is data dependent on xmm0.
-    __ xorptr(rcounter_addr, rax);
-    __ xorptr(rcounter_addr, rax);
-    __ cmpl (rcounter, Address(rcounter_addr, 0));
-  } else {
-    __ cmp32 (rcounter, counter);
-  }
+  __ lea(rcounter_addr, counter);
+  __ movdq (rax, xmm0);
+  // counter address is data dependent on xmm0.
+  __ xorptr(rcounter_addr, rax);
+  __ xorptr(rcounter_addr, rax);
+  __ cmpl (rcounter, Address(rcounter_addr, 0));
   __ jcc (Assembler::notEqual, slow);
 
   __ ret (0);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1030,8 +1030,7 @@
 }
 
 void MacroAssembler::atomic_incl(Address counter_addr) {
-  if (os::is_MP())
-    lock();
+  lock();
   incrementl(counter_addr);
 }
 
@@ -1046,8 +1045,7 @@
 
 #ifdef _LP64
 void MacroAssembler::atomic_incq(Address counter_addr) {
-  if (os::is_MP())
-    lock();
+  lock();
   incrementq(counter_addr);
 }
 
@@ -1213,9 +1211,7 @@
   get_thread(tmp_reg);
   orptr(tmp_reg, swap_reg);
 #endif
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
@@ -1248,9 +1244,7 @@
   orptr(tmp_reg, swap_reg);
   movptr(swap_reg, saved_mark_addr);
 #endif
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // If the biasing toward our thread failed, then another thread
   // succeeded in biasing it toward itself and we need to revoke that
@@ -1278,9 +1272,7 @@
   // bits in this situation. Should attempt to preserve them.
   NOT_LP64( movptr(swap_reg, saved_mark_addr); )
   load_prototype_header(tmp_reg, obj_reg);
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
@@ -1376,9 +1368,7 @@
   if (method_data != NULL) {
     // set rtm_state to "no rtm" in MDO
     mov_metadata(tmpReg, method_data);
-    if (os::is_MP()) {
-      lock();
-    }
+    lock();
     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
   }
   jmpb(L_done);
@@ -1392,9 +1382,7 @@
   if (method_data != NULL) {
     // set rtm_state to "always rtm" in MDO
     mov_metadata(tmpReg, method_data);
-    if (os::is_MP()) {
-      lock();
-    }
+    lock();
     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
   }
   bind(L_done);
@@ -1605,9 +1593,7 @@
   get_thread(scrReg);
   Register threadReg = scrReg;
 #endif
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
 
   if (RTMRetryCount > 0) {
@@ -1767,9 +1753,7 @@
   // Attempt stack-locking ...
   orptr (tmpReg, markOopDesc::unlocked_value);
   movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes()));      // Updates tmpReg
   if (counters != NULL) {
     cond_inc32(Assembler::equal,
@@ -1826,9 +1810,7 @@
   // we later store "Self" into m->Owner.  Transiently storing a stack address
   // (rsp or the address of the box) into  m->owner is harmless.
   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
   // If we weren't able to swing _owner from NULL to the BasicLock
@@ -1851,9 +1833,7 @@
   movq(scrReg, tmpReg);
   xorq(tmpReg, tmpReg);
 
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
   // Without cast to int32_t movptr will destroy r10 which is typically obj.
@@ -2000,9 +1980,7 @@
   // The "box" value on the stack is stable, so we can reload
   // and be assured we observe the same value as above.
   movptr(tmpReg, Address(boxReg, 0));
-  if (os::is_MP()) {
-    lock();
-  }
+  lock();
   cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
   // Intention fall-thru into DONE_LABEL
 
@@ -2036,16 +2014,16 @@
 
   xorptr(boxReg, boxReg);
   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
-  if (os::is_MP()) {
-    // Memory barrier/fence
-    // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
-    // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
-    // This is faster on Nehalem and AMD Shanghai/Barcelona.
-    // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
-    // We might also restructure (ST Owner=0;barrier;LD _Succ) to
-    // (mov box,0; xchgq box, &m->Owner; LD _succ) .
-    lock(); addl(Address(rsp, 0), 0);
-  }
+
+  // Memory barrier/fence
+  // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
+  // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
+  // This is faster on Nehalem and AMD Shanghai/Barcelona.
+  // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
+  // We might also restructure (ST Owner=0;barrier;LD _Succ) to
+  // (mov box,0; xchgq box, &m->Owner; LD _succ) .
+  lock(); addl(Address(rsp, 0), 0);
+
   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
   jccb  (Assembler::notZero, LSuccess);
 
@@ -2063,7 +2041,7 @@
 
   // box is really RAX -- the following CMPXCHG depends on that binding
   // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
-  if (os::is_MP()) { lock(); }
+  lock();
   cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   // There's no successor so we tried to regrab the lock.
   // If that didn't work, then another thread grabbed the
@@ -2081,7 +2059,7 @@
 
   bind  (Stacked);
   movptr(tmpReg, Address (boxReg, 0));      // re-fetch
-  if (os::is_MP()) { lock(); }
+  lock();
   cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
 
 #endif
@@ -2633,13 +2611,11 @@
 
 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
   if (reachable(adr)) {
-    if (os::is_MP())
-      lock();
+    lock();
     cmpxchgptr(reg, as_Address(adr));
   } else {
     lea(rscratch1, adr);
-    if (os::is_MP())
-      lock();
+    lock();
     cmpxchgptr(reg, Address(rscratch1, 0));
   }
 }
@@ -3266,7 +3242,9 @@
   }
 }
 
+#ifdef COMPILER2
 void MacroAssembler::setvectmask(Register dst, Register src) {
+  guarantee(PostLoopMultiversioning, "must be");
   Assembler::movl(dst, 1);
   Assembler::shlxl(dst, dst, src);
   Assembler::decl(dst);
@@ -3275,8 +3253,10 @@
 }
 
 void MacroAssembler::restorevectmask() {
+  guarantee(PostLoopMultiversioning, "must be");
   Assembler::knotwl(k1, k0);
 }
+#endif // COMPILER2
 
 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
   if (reachable(src)) {
@@ -5026,12 +5006,15 @@
   // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
   vzeroupper();
   // Reset k1 to 0xffff.
-  if (VM_Version::supports_evex()) {
+
+#ifdef COMPILER2
+  if (PostLoopMultiversioning && VM_Version::supports_evex()) {
     push(rcx);
     movl(rcx, 0xffff);
     kmovwl(k1, rcx);
     pop(rcx);
   }
+#endif // COMPILER2
 
 #ifndef _LP64
   // Either restore the x87 floating pointer control word after returning
@@ -6681,8 +6664,6 @@
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
-    set_vector_masking();  // opening of the stub context for programming mask registers
-
     Label test_64_loop, test_tail;
     Register tmp3_aliased = len;
 
@@ -6711,15 +6692,12 @@
     testl(tmp1, -1);
     jcc(Assembler::zero, FALSE_LABEL);
 
-    // Save k1
-    kmovql(k3, k1);
-
     // ~(~0 << len) applied up to two times (for 32-bit scenario)
 #ifdef _LP64
     mov64(tmp3_aliased, 0xFFFFFFFFFFFFFFFF);
     shlxq(tmp3_aliased, tmp3_aliased, tmp1);
     notq(tmp3_aliased);
-    kmovql(k1, tmp3_aliased);
+    kmovql(k3, tmp3_aliased);
 #else
     Label k_init;
     jmp(k_init);
@@ -6728,7 +6706,7 @@
     // data required to compose 64 1's to the instruction stream
     // We emit 64 byte wide series of elements from 0..63 which later on would
     // be used as a compare targets with tail count contained in tmp1 register.
-    // Result would be a k1 register having tmp1 consecutive number or 1
+    // Result would be a k register having tmp1 consecutive number or 1
     // counting from least significant bit.
     address tmp = pc();
     emit_int64(0x0706050403020100);
@@ -6744,18 +6722,14 @@
     lea(len, InternalAddress(tmp));
     // create mask to test for negative byte inside a vector
     evpbroadcastb(vec1, tmp1, Assembler::AVX_512bit);
-    evpcmpgtb(k1, vec1, Address(len, 0), Assembler::AVX_512bit);
+    evpcmpgtb(k3, vec1, Address(len, 0), Assembler::AVX_512bit);
 
 #endif
-    evpcmpgtb(k2, k1, vec2, Address(ary1, 0), Assembler::AVX_512bit);
-    ktestq(k2, k1);
-    // Restore k1
-    kmovql(k1, k3);
+    evpcmpgtb(k2, k3, vec2, Address(ary1, 0), Assembler::AVX_512bit);
+    ktestq(k2, k3);
     jcc(Assembler::notZero, TRUE_LABEL);
 
     jmp(FALSE_LABEL);
-
-    clear_vector_masking();   // closing of the stub context for programming mask registers
   } else {
     movl(result, len); // copy
 
@@ -7197,10 +7171,6 @@
     {
       assert( UseSSE >= 2, "supported cpu only" );
       Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
-      if (UseAVX > 2) {
-        movl(rtmp, 0xffff);
-        kmovwl(k1, rtmp);
-      }
       movdl(xtmp, value);
       if (UseAVX > 2 && UseUnalignedLoadStores) {
         // Fill 64-byte chunks
@@ -7945,7 +7915,6 @@
       VM_Version::supports_avx512vlbw()) {
     Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
 
-    set_vector_masking();  // opening of the stub context for programming mask registers
     cmpq(length, 64);
     jcc(Assembler::less, VECTOR32_TAIL);
     movq(tmp1, length);
@@ -7968,19 +7937,15 @@
 
     //bind(VECTOR64_TAIL);
     // AVX512 code to compare upto 63 byte vectors.
-    // Save k1
-    kmovql(k3, k1);
     mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
     shlxq(tmp2, tmp2, tmp1);
     notq(tmp2);
-    kmovql(k1, tmp2);
-
-    evmovdqub(rymm0, k1, Address(obja, result), Assembler::AVX_512bit);
-    evpcmpeqb(k7, k1, rymm0, Address(objb, result), Assembler::AVX_512bit);
-
-    ktestql(k7, k1);
-    // Restore k1
-    kmovql(k1, k3);
+    kmovql(k3, tmp2);
+
+    evmovdqub(rymm0, k3, Address(obja, result), Assembler::AVX_512bit);
+    evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit);
+
+    ktestql(k7, k3);
     jcc(Assembler::below, SAME_TILL_END);     // not mismatch
 
     bind(VECTOR64_NOT_EQUAL);
@@ -7991,7 +7956,6 @@
     shrq(result);
     jmp(DONE);
     bind(VECTOR32_TAIL);
-    clear_vector_masking();   // closing of the stub context for programming mask registers
   }
 
   cmpq(length, 8);
@@ -8752,11 +8716,6 @@
   // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
   // context for the registers used, where all instructions below are using 128-bit mode
   // On EVEX without VL and BW, these instructions will all be AVX.
-  if (VM_Version::supports_avx512vlbw()) {
-    movl(tmp, 0xffff);
-    kmovwl(k1, tmp);
-  }
-
   lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
   notl(crc); // ~crc
   cmpl(len, 16);
@@ -9418,9 +9377,7 @@
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
-    set_vector_masking();  // opening of the stub context for programming mask registers
-
-    Label copy_32_loop, copy_loop_tail, restore_k1_return_zero, below_threshold;
+    Label copy_32_loop, copy_loop_tail, below_threshold;
 
     // alignment
     Label post_alignment;
@@ -9434,9 +9391,6 @@
     movl(result, 0x00FF);
     evpbroadcastw(tmp2Reg, result, Assembler::AVX_512bit);
 
-    // Save k1
-    kmovql(k3, k1);
-
     testl(len, -64);
     jcc(Assembler::zero, post_alignment);
 
@@ -9453,14 +9407,14 @@
     movl(result, 0xFFFFFFFF);
     shlxl(result, result, tmp5);
     notl(result);
-    kmovdl(k1, result);
-
-    evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit);
-    evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
-    ktestd(k2, k1);
-    jcc(Assembler::carryClear, restore_k1_return_zero);
-
-    evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit);
+    kmovdl(k3, result);
+
+    evmovdquw(tmp1Reg, k3, Address(src, 0), Assembler::AVX_512bit);
+    evpcmpuw(k2, k3, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
+    ktestd(k2, k3);
+    jcc(Assembler::carryClear, return_zero);
+
+    evpmovwb(Address(dst, 0), k3, tmp1Reg, Assembler::AVX_512bit);
 
     addptr(src, tmp5);
     addptr(src, tmp5);
@@ -9483,7 +9437,7 @@
     evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit);
     evpcmpuw(k2, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
     kortestdl(k2, k2);
-    jcc(Assembler::carryClear, restore_k1_return_zero);
+    jcc(Assembler::carryClear, return_zero);
 
     // All elements in current processed chunk are valid candidates for
     // compression. Write a truncated byte elements to the memory.
@@ -9494,8 +9448,6 @@
     bind(copy_loop_tail);
     // bail out when there is nothing to be done
     testl(tmp5, 0xFFFFFFFF);
-    // Restore k1
-    kmovql(k1, k3);
     jcc(Assembler::zero, return_length);
 
     movl(len, tmp5);
@@ -9505,25 +9457,16 @@
     shlxl(result, result, len);
     notl(result);
 
-    kmovdl(k1, result);
-
-    evmovdquw(tmp1Reg, k1, Address(src, 0), Assembler::AVX_512bit);
-    evpcmpuw(k2, k1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
-    ktestd(k2, k1);
-    jcc(Assembler::carryClear, restore_k1_return_zero);
-
-    evpmovwb(Address(dst, 0), k1, tmp1Reg, Assembler::AVX_512bit);
-    // Restore k1
-    kmovql(k1, k3);
+    kmovdl(k3, result);
+
+    evmovdquw(tmp1Reg, k3, Address(src, 0), Assembler::AVX_512bit);
+    evpcmpuw(k2, k3, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit);
+    ktestd(k2, k3);
+    jcc(Assembler::carryClear, return_zero);
+
+    evpmovwb(Address(dst, 0), k3, tmp1Reg, Assembler::AVX_512bit);
     jmp(return_length);
 
-    bind(restore_k1_return_zero);
-    // Restore k1
-    kmovql(k1, k3);
-    jmp(return_zero);
-
-    clear_vector_masking();   // closing of the stub context for programming mask registers
-
     bind(below_threshold);
   }
 
@@ -9637,8 +9580,6 @@
     VM_Version::supports_avx512vlbw() &&
     VM_Version::supports_bmi2()) {
 
-    set_vector_masking();  // opening of the stub context for programming mask registers
-
     Label copy_32_loop, copy_tail;
     Register tmp3_aliased = len;
 
@@ -9670,22 +9611,15 @@
     testl(tmp2, -1); // we don't destroy the contents of tmp2 here
     jcc(Assembler::zero, done);
 
-    // Save k1
-    kmovql(k2, k1);
-
     // ~(~0 << length), where length is the # of remaining elements to process
     movl(tmp3_aliased, -1);
     shlxl(tmp3_aliased, tmp3_aliased, tmp2);
     notl(tmp3_aliased);
-    kmovdl(k1, tmp3_aliased);
-    evpmovzxbw(tmp1, k1, Address(src, 0), Assembler::AVX_512bit);
-    evmovdquw(Address(dst, 0), k1, tmp1, Assembler::AVX_512bit);
-
-    // Restore k1
-    kmovql(k1, k2);
+    kmovdl(k2, tmp3_aliased);
+    evpmovzxbw(tmp1, k2, Address(src, 0), Assembler::AVX_512bit);
+    evmovdquw(Address(dst, 0), k2, tmp1, Assembler::AVX_512bit);
+
     jmp(done);
-
-    clear_vector_masking();   // closing of the stub context for programming mask registers
   }
   if (UseSSE42Intrinsics) {
     Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail;
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -156,9 +156,11 @@
   void incrementq(Register reg, int value = 1);
   void incrementq(Address dst, int value = 1);
 
+#ifdef COMPILER2
   // special instructions for EVEX
   void setvectmask(Register dst, Register src);
   void restorevectmask();
+#endif
 
   // Support optimal SSE move instructions.
   void movflt(XMMRegister dst, XMMRegister src) {
--- a/src/hotspot/cpu/x86/nativeInst_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/nativeInst_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -202,9 +202,7 @@
   assert (instr_addr != NULL, "illegal address for code patching");
 
   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
-  if (os::is_MP()) {
-    guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
-  }
+  guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
 
   // First patch dummy jmp in place
   unsigned char patch[4];
@@ -262,67 +260,14 @@
   assert(Patching_lock->is_locked() ||
          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
   // Both C1 and C2 should now be generating code which aligns the patched address
-  // to be within a single cache line except that C1 does not do the alignment on
-  // uniprocessor systems.
+  // to be within a single cache line.
   bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
                     ((uintptr_t)displacement_address() + 3) / cache_line_size;
 
-  guarantee(!os::is_MP() || is_aligned, "destination must be aligned");
+  guarantee(is_aligned, "destination must be aligned");
 
-  if (is_aligned) {
-    // Simple case:  The destination lies within a single cache line.
-    set_destination(dest);
-  } else if ((uintptr_t)instruction_address() / cache_line_size ==
-             ((uintptr_t)instruction_address()+1) / cache_line_size) {
-    // Tricky case:  The instruction prefix lies within a single cache line.
-    intptr_t disp = dest - return_address();
-#ifdef AMD64
-    guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
-#endif // AMD64
-
-    int call_opcode = instruction_address()[0];
-
-    // First patch dummy jump in place:
-    {
-      u_char patch_jump[2];
-      patch_jump[0] = 0xEB;       // jmp rel8
-      patch_jump[1] = 0xFE;       // jmp to self
-
-      assert(sizeof(patch_jump)==sizeof(short), "sanity check");
-      *(short*)instruction_address() = *(short*)patch_jump;
-    }
-    // Invalidate.  Opteron requires a flush after every write.
-    wrote(0);
-
-    // (Note: We assume any reader which has already started to read
-    // the unpatched call will completely read the whole unpatched call
-    // without seeing the next writes we are about to make.)
-
-    // Next, patch the last three bytes:
-    u_char patch_disp[5];
-    patch_disp[0] = call_opcode;
-    *(int32_t*)&patch_disp[1] = (int32_t)disp;
-    assert(sizeof(patch_disp)==instruction_size, "sanity check");
-    for (int i = sizeof(short); i < instruction_size; i++)
-      instruction_address()[i] = patch_disp[i];
-
-    // Invalidate.  Opteron requires a flush after every write.
-    wrote(sizeof(short));
-
-    // (Note: We assume that any reader which reads the opcode we are
-    // about to repatch will also read the writes we just made.)
-
-    // Finally, overwrite the jump:
-    *(short*)instruction_address() = *(short*)patch_disp;
-    // Invalidate.  Opteron requires a flush after every write.
-    wrote(0);
-
-    debug_only(verify());
-    guarantee(destination() == dest, "patch succeeded");
-  } else {
-    // Impossible:  One or the other must be atomically writable.
-    ShouldNotReachHere();
-  }
+  // The destination lies within a single cache line.
+  set_destination(dest);
 }
 
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2007,12 +2007,9 @@
     // Save (object->mark() | 1) into BasicLock's displaced header
     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
 
-    if (os::is_MP()) {
-      __ lock();
-    }
-
     // src -> dest iff dest == rax, else rax, <- dest
     // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
+    __ lock();
     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::equal, lock_done);
 
@@ -2091,19 +2088,17 @@
   //     didn't see any synchronization is progress, and escapes.
   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
 
-  if(os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(thread, rcx);
-    }
+  if (UseMembar) {
+    // Force this write out before the read below
+    __ membar(Assembler::Membar_mask_bits(
+              Assembler::LoadLoad | Assembler::LoadStore |
+              Assembler::StoreLoad | Assembler::StoreStore));
+  } else {
+    // Write serialization page so VM thread can do a pseudo remote membar.
+    // We use the current thread pointer to calculate a thread specific
+    // offset to write to within the page. This minimizes bus traffic
+    // due to cache line collision.
+    __ serialize_memory(thread, rcx);
   }
 
   if (AlwaysRestoreFPU) {
@@ -2199,12 +2194,9 @@
     __ lea(rax, Address(rbp, lock_slot_rbp_offset));
 
     // Atomic swap old header if oop still contains the stack lock
-    if (os::is_MP()) {
-    __ lock();
-    }
-
     // src -> dest iff dest == rax, else rax, <- dest
     // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
+    __ lock();
     __ cmpxchgptr(rbx, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::notEqual, slow_path_unlock);
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -2464,11 +2464,8 @@
     // Save (object->mark() | 1) into BasicLock's displaced header
     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
 
-    if (os::is_MP()) {
-      __ lock();
-    }
-
     // src -> dest iff dest == rax else rax <- dest
+    __ lock();
     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::equal, lock_done);
 
@@ -2558,19 +2555,17 @@
   //     didn't see any synchronization is progress, and escapes.
   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
 
-  if(os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(r15_thread, rcx);
-    }
+  if (UseMembar) {
+    // Force this write out before the read below
+    __ membar(Assembler::Membar_mask_bits(
+                Assembler::LoadLoad | Assembler::LoadStore |
+                Assembler::StoreLoad | Assembler::StoreStore));
+  } else {
+    // Write serialization page so VM thread can do a pseudo remote membar.
+    // We use the current thread pointer to calculate a thread specific
+    // offset to write to within the page. This minimizes bus traffic
+    // due to cache line collision.
+    __ serialize_memory(r15_thread, rcx);
   }
 
   Label after_transition;
@@ -2661,9 +2656,7 @@
     __ movptr(old_hdr, Address(rax, 0));
 
     // Atomic swap old header if oop still contains the stack lock
-    if (os::is_MP()) {
-      __ lock();
-    }
+    __ lock();
     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::notEqual, slow_path_unlock);
 
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -87,8 +87,8 @@
     case T_INT:     inc_counter_np(SharedRuntime::_jint_array_copy_ctr); return;
     case T_LONG:    inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); return;
     case T_OBJECT:  inc_counter_np(SharedRuntime::_oop_array_copy_ctr); return;
+    default:        ShouldNotReachHere();
     }
-    ShouldNotReachHere();
 #endif //PRODUCT
   }
 
@@ -153,12 +153,6 @@
     __ movptr(saved_rsi, rsi);
     __ movptr(saved_rbx, rbx);
 
-    // provide initial value for required masks
-    if (UseAVX > 2) {
-      __ movl(rbx, 0xffff);
-      __ kmovwl(k1, rbx);
-    }
-
     // save and initialize %mxcsr
     if (sse_save) {
       Label skip_ldmx;
@@ -679,12 +673,7 @@
   void xmm_copy_forward(Register from, Register to_from, Register qword_count) {
     assert( UseSSE >= 2, "supported cpu only" );
     Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit;
-    if (UseAVX > 2) {
-      __ push(rbx);
-      __ movl(rbx, 0xffff);
-      __ kmovwl(k1, rbx);
-      __ pop(rbx);
-    }
+
     // Copy 64-byte chunks
     __ jmpb(L_copy_64_bytes);
     __ align(OptoLoopAlignment);
@@ -2115,14 +2104,6 @@
 
     __ enter();   // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     __ movptr(from, from_param);
     __ movptr(key, key_param);
 
@@ -2222,14 +2203,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     __ movptr(from, from_param);
     __ movptr(key, key_param);
 
@@ -2356,14 +2329,6 @@
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     handleSOERegisters(true /*saving*/);
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     // load registers from incoming parameters
     const Address  from_param(rbp, 8+0);
     const Address  to_param  (rbp, 8+4);
@@ -2532,14 +2497,6 @@
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     handleSOERegisters(true /*saving*/);
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     // load registers from incoming parameters
     const Address  from_param(rbp, 8+0);
     const Address  to_param  (rbp, 8+4);
@@ -2693,14 +2650,6 @@
     __ enter(); // required for proper stackwalking of RuntimeStub frame
     handleSOERegisters(true /*saving*/); // save rbx, rsi, rdi
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     // load registers from incoming parameters
     const Address  from_param(rbp, 8+0);
     const Address  to_param  (rbp, 8+4);
@@ -3154,14 +3103,6 @@
     __ enter();
     handleSOERegisters(true);  // Save registers
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rdx, 0xffff);
-      __ kmovdl(k1, rdx);
-    }
-
     __ movptr(state, state_param);
     __ movptr(subkeyH, subkeyH_param);
     __ movptr(data, data_param);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -254,10 +254,7 @@
     __ movptr(r13_save, r13);
     __ movptr(r14_save, r14);
     __ movptr(r15_save, r15);
-    if (UseAVX > 2) {
-      __ movl(rbx, 0xffff);
-      __ kmovwl(k1, rbx);
-    }
+
 #ifdef _WIN64
     int last_reg = 15;
     if (UseAVX > 2) {
@@ -610,7 +607,7 @@
     address start = __ pc();
 
     __ movl(rax, c_rarg2);
-   if ( os::is_MP() ) __ lock();
+    __ lock();
     __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
     __ ret(0);
 
@@ -636,7 +633,7 @@
     address start = __ pc();
 
     __ movsbq(rax, c_rarg2);
-   if ( os::is_MP() ) __ lock();
+    __ lock();
     __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
     __ ret(0);
 
@@ -662,7 +659,7 @@
     address start = __ pc();
 
     __ movq(rax, c_rarg2);
-   if ( os::is_MP() ) __ lock();
+    __ lock();
     __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
     __ ret(0);
 
@@ -683,7 +680,7 @@
     address start = __ pc();
 
     __ movl(rax, c_rarg0);
-   if ( os::is_MP() ) __ lock();
+    __ lock();
     __ xaddl(Address(c_rarg1, 0), c_rarg0);
     __ addl(rax, c_rarg0);
     __ ret(0);
@@ -705,7 +702,7 @@
     address start = __ pc();
 
     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
-   if ( os::is_MP() ) __ lock();
+    __ lock();
     __ xaddptr(Address(c_rarg1, 0), c_rarg0);
     __ addptr(rax, c_rarg0);
     __ ret(0);
@@ -1257,10 +1254,6 @@
     __ align(OptoLoopAlignment);
     if (UseUnalignedLoadStores) {
       Label L_end;
-      if (UseAVX > 2) {
-        __ movl(to, 0xffff);
-        __ kmovwl(k1, to);
-      }
       // Copy 64-bytes per iteration
       __ BIND(L_loop);
       if (UseAVX > 2) {
@@ -1341,10 +1334,6 @@
     __ align(OptoLoopAlignment);
     if (UseUnalignedLoadStores) {
       Label L_end;
-      if (UseAVX > 2) {
-        __ movl(to, 0xffff);
-        __ kmovwl(k1, to);
-      }
       // Copy 64-bytes per iteration
       __ BIND(L_loop);
       if (UseAVX > 2) {
@@ -3005,14 +2994,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rax, 0xffff);
-      __ kmovql(k1, rax);
-    }
-
     // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
     __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
 
@@ -3107,14 +3088,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rax, 0xffff);
-      __ kmovql(k1, rax);
-    }
-
     // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
     __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
 
@@ -3227,14 +3200,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rax, 0xffff);
-      __ kmovql(k1, rax);
-    }
-
 #ifdef _WIN64
     // on win64, fill len_reg from stack position
     __ movl(len_reg, len_mem);
@@ -3428,14 +3393,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rax, 0xffff);
-      __ kmovql(k1, rax);
-    }
-
 #ifdef _WIN64
     // on win64, fill len_reg from stack position
     __ movl(len_reg, len_mem);
@@ -3902,14 +3859,6 @@
 
     __ enter(); // required for proper stackwalking of RuntimeStub frame
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-        __ movl(rax, 0xffff);
-        __ kmovql(k1, rax);
-    }
-
 #ifdef _WIN64
     // allocate spill slots for r13, r14
     enum {
@@ -4484,14 +4433,6 @@
 
     __ enter();
 
-    // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge
-    // context for the registers used, where all instructions below are using 128-bit mode
-    // On EVEX without VL and BW, these instructions will all be AVX.
-    if (VM_Version::supports_avx512vlbw()) {
-      __ movl(rax, 0xffff);
-      __ kmovql(k1, rax);
-    }
-
     __ movdqu(xmm_temp10, ExternalAddress(StubRoutines::x86::ghash_long_swap_mask_addr()));
 
     __ movdqu(xmm_temp0, Address(state, 0));
@@ -4761,7 +4702,6 @@
     __ push(r13);
     __ push(r14);
     __ push(r15);
-    __ push(rbx);
 
     // arguments
     const Register source = c_rarg0; // Source Array
@@ -4790,8 +4730,6 @@
     __ cmpl(length, 0);
     __ jcc(Assembler::lessEqual, L_exit);
 
-    // Save k1 value in rbx
-    __ kmovql(rbx, k1);
     __ lea(r11, ExternalAddress(StubRoutines::x86::base64_charset_addr()));
     // check if base64 charset(isURL=0) or base64 url charset(isURL=1) needs to be loaded
     __ cmpl(isURL, 0);
@@ -4802,7 +4740,7 @@
     __ BIND(L_processdata);
     __ movdqu(xmm16, ExternalAddress(StubRoutines::x86::base64_gather_mask_addr()));
     // Set 64 bits of K register.
-    __ evpcmpeqb(k1, xmm16, xmm16, Assembler::AVX_512bit);
+    __ evpcmpeqb(k3, xmm16, xmm16, Assembler::AVX_512bit);
     __ evmovdquq(xmm12, ExternalAddress(StubRoutines::x86::base64_bswap_mask_addr()), Assembler::AVX_256bit, r13);
     __ evmovdquq(xmm13, ExternalAddress(StubRoutines::x86::base64_right_shift_mask_addr()), Assembler::AVX_512bit, r13);
     __ evmovdquq(xmm14, ExternalAddress(StubRoutines::x86::base64_left_shift_mask_addr()), Assembler::AVX_512bit, r13);
@@ -4881,17 +4819,17 @@
     __ vextracti64x4(xmm4, xmm5, 1);
     __ vpmovzxwd(xmm7, xmm4, Assembler::AVX_512bit);
 
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm4, k2, Address(r11, xmm0, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm5, k2, Address(r11, xmm1, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm8, k2, Address(r11, xmm2, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm9, k2, Address(r11, xmm3, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm10, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm11, k2, Address(r11, xmm7, Address::times_4, 0), Assembler::AVX_512bit);
 
     //Down convert dword to byte. Final output is 16*6 = 96 bytes long
@@ -4927,9 +4865,9 @@
     __ vpmovzxwd(xmm6, xmm9, Assembler::AVX_512bit);
     __ vextracti64x4(xmm9, xmm1, 1);
     __ vpmovzxwd(xmm5, xmm9,  Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm8, k2, Address(r11, xmm6, Address::times_4, 0), Assembler::AVX_512bit);
-    __ kmovql(k2, k1);
+    __ kmovql(k2, k3);
     __ evpgatherdd(xmm10, k2, Address(r11, xmm5, Address::times_4, 0), Assembler::AVX_512bit);
     __ evpmovdb(Address(dest, dp, Address::times_1, 0), xmm8, Assembler::AVX_512bit);
     __ evpmovdb(Address(dest, dp, Address::times_1, 16), xmm10, Assembler::AVX_512bit);
@@ -4985,9 +4923,6 @@
     __ addq(source, 3);
     __ jmp(L_process3);
     __ BIND(L_exit);
-    // restore k1 register value
-    __ kmovql(k1, rbx);
-    __ pop(rbx);
     __ pop(r15);
     __ pop(r14);
     __ pop(r13);
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1090,19 +1090,17 @@
   __ movl(Address(thread, JavaThread::thread_state_offset()),
           _thread_in_native_trans);
 
-  if (os::is_MP()) {
-    if (UseMembar) {
-      // Force this write out before the read below
-      __ membar(Assembler::Membar_mask_bits(
-           Assembler::LoadLoad | Assembler::LoadStore |
-           Assembler::StoreLoad | Assembler::StoreStore));
-    } else {
-      // Write serialization page so VM thread can do a pseudo remote membar.
-      // We use the current thread pointer to calculate a thread specific
-      // offset to write to within the page. This minimizes bus traffic
-      // due to cache line collision.
-      __ serialize_memory(thread, rcx);
-    }
+  if (UseMembar) {
+    // Force this write out before the read below
+    __ membar(Assembler::Membar_mask_bits(
+                Assembler::LoadLoad | Assembler::LoadStore |
+                Assembler::StoreLoad | Assembler::StoreStore));
+  } else {
+    // Write serialization page so VM thread can do a pseudo remote membar.
+    // We use the current thread pointer to calculate a thread specific
+    // offset to write to within the page. This minimizes bus traffic
+    // due to cache line collision.
+    __ serialize_memory(thread, rcx);
   }
 
 #ifndef _LP64
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -448,7 +448,7 @@
     Label notNull;
     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
     __ movptr(tmp, null_sentinel);
-    __ cmpptr(tmp, result);
+    __ cmpoop(tmp, result);
     __ jccb(Assembler::notEqual, notNull);
     __ xorptr(result, result);  // NULL object reference
     __ bind(notNull);
@@ -2714,7 +2714,6 @@
 
 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
   // Helper function to insert a is-volatile test and memory barrier
-  if(!os::is_MP()) return;    // Not needed on single CPU
   __ membar(order_constraint);
 }
 
@@ -3493,13 +3492,12 @@
   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
   // replace index with field offset from cache entry
   // [jk] not needed currently
-  // if (os::is_MP()) {
-  //   __ movl(rdx, Address(rcx, rbx, Address::times_8,
-  //                        in_bytes(ConstantPoolCache::base_offset() +
-  //                                 ConstantPoolCacheEntry::flags_offset())));
-  //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
-  //   __ andl(rdx, 0x1);
-  // }
+  // __ movl(rdx, Address(rcx, rbx, Address::times_8,
+  //                      in_bytes(ConstantPoolCache::base_offset() +
+  //                               ConstantPoolCacheEntry::flags_offset())));
+  // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
+  // __ andl(rdx, 0x1);
+  //
   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
                          in_bytes(ConstantPoolCache::base_offset() +
                                   ConstantPoolCacheEntry::f2_offset())));
@@ -3544,13 +3542,11 @@
     ShouldNotReachHere();
   }
   // [jk] not needed currently
-  // if (os::is_MP()) {
   //   Label notVolatile;
   //   __ testl(rdx, rdx);
   //   __ jcc(Assembler::zero, notVolatile);
   //   __ membar(Assembler::LoadLoad);
   //   __ bind(notVolatile);
-  //};
 }
 
 void TemplateTable::fast_xaccess(TosState state) {
@@ -3585,17 +3581,15 @@
   }
 
   // [jk] not needed currently
-  // if (os::is_MP()) {
-  //   Label notVolatile;
-  //   __ movl(rdx, Address(rcx, rdx, Address::times_8,
-  //                        in_bytes(ConstantPoolCache::base_offset() +
-  //                                 ConstantPoolCacheEntry::flags_offset())));
-  //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
-  //   __ testl(rdx, 0x1);
-  //   __ jcc(Assembler::zero, notVolatile);
-  //   __ membar(Assembler::LoadLoad);
-  //   __ bind(notVolatile);
-  // }
+  // Label notVolatile;
+  // __ movl(rdx, Address(rcx, rdx, Address::times_8,
+  //                      in_bytes(ConstantPoolCache::base_offset() +
+  //                               ConstantPoolCacheEntry::flags_offset())));
+  // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
+  // __ testl(rdx, 0x1);
+  // __ jcc(Assembler::zero, notVolatile);
+  // __ membar(Assembler::LoadLoad);
+  // __ bind(notVolatile);
 
   __ decrement(rbcp);
 }
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -401,8 +401,6 @@
       // load value into all 64 bytes of zmm7 register
       __ movl(rcx, VM_Version::ymm_test_value());
       __ movdl(xmm0, rcx);
-      __ movl(rcx, 0xffff);
-      __ kmovwl(k1, rcx);
       __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
       __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
 #ifdef _LP64
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -608,10 +608,10 @@
 
   static bool os_supports_avx_vectors() {
     bool retVal = false;
+    int nreg = 2 LP64_ONLY(+2);
     if (supports_evex()) {
       // Verify that OS save/restore all bits of EVEX registers
       // during signal processing.
-      int nreg = 2 LP64_ONLY(+2);
       retVal = true;
       for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
         if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
@@ -622,7 +622,6 @@
     } else if (supports_avx()) {
       // Verify that OS save/restore all bits of AVX registers
       // during signal processing.
-      int nreg = 2 LP64_ONLY(+2);
       retVal = true;
       for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
         if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
@@ -634,7 +633,6 @@
       if (retVal == false) {
         // Verify that OS save/restore all bits of EVEX registers
         // during signal processing.
-        int nreg = 2 LP64_ONLY(+2);
         retVal = true;
         for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
           if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
--- a/src/hotspot/cpu/x86/x86.ad	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/x86.ad	Thu Oct 04 14:17:59 2018 +0530
@@ -2804,11 +2804,7 @@
 
   format %{
     $$template
-    if (os::is_MP()) {
-      $$emit$$"pause\t! membar_onspinwait"
-    } else {
-      $$emit$$"MEMBAR-onspinwait ! (empty encoding)"
-    }
+    $$emit$$"pause\t! membar_onspinwait"
   %}
   ins_encode %{
     __ pause();
--- a/src/hotspot/cpu/x86/x86_32.ad	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/x86_32.ad	Thu Oct 04 14:17:59 2018 +0530
@@ -2087,8 +2087,7 @@
   %}
 
   enc_class lock_prefix( ) %{
-    if( os::is_MP() )
-      emit_opcode(cbuf,0xF0);         // [Lock]
+    emit_opcode(cbuf,0xF0);         // [Lock]
   %}
 
   // Cmp-xchg long value.
@@ -2102,8 +2101,7 @@
     emit_opcode(cbuf,0x87);
     emit_opcode(cbuf,0xD9);
     // [Lock]
-    if( os::is_MP() )
-      emit_opcode(cbuf,0xF0);
+    emit_opcode(cbuf,0xF0);
     // CMPXCHG8 [Eptr]
     emit_opcode(cbuf,0x0F);
     emit_opcode(cbuf,0xC7);
@@ -2115,8 +2113,7 @@
 
   enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
     // [Lock]
-    if( os::is_MP() )
-      emit_opcode(cbuf,0xF0);
+    emit_opcode(cbuf,0xF0);
 
     // CMPXCHG [Eptr]
     emit_opcode(cbuf,0x0F);
@@ -2126,8 +2123,7 @@
 
   enc_class enc_cmpxchgb(eSIRegP mem_ptr) %{
     // [Lock]
-    if( os::is_MP() )
-      emit_opcode(cbuf,0xF0);
+    emit_opcode(cbuf,0xF0);
 
     // CMPXCHGB [Eptr]
     emit_opcode(cbuf,0x0F);
@@ -2137,8 +2133,7 @@
 
   enc_class enc_cmpxchgw(eSIRegP mem_ptr) %{
     // [Lock]
-    if( os::is_MP() )
-      emit_opcode(cbuf,0xF0);
+    emit_opcode(cbuf,0xF0);
 
     // 16-bit mode
     emit_opcode(cbuf, 0x66);
@@ -6764,11 +6759,7 @@
 
   format %{
     $$template
-    if (os::is_MP()) {
-      $$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
-    } else {
-      $$emit$$"MEMBAR-volatile ! (empty encoding)"
-    }
+    $$emit$$"LOCK ADDL [ESP + #0], 0\t! membar_volatile"
   %}
   ins_encode %{
     __ membar(Assembler::StoreLoad);
@@ -7373,8 +7364,7 @@
     //       rcx as the high order word of the new value to store but
     //       our register encoding uses rbx.
     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
-    if( os::is_MP() )
-      __ lock();
+    __ lock();
     __ cmpxchg8($mem$$Address);
     __ xchgl(as_Register(EBX_enc), as_Register(ECX_enc));
   %}
@@ -7499,7 +7489,7 @@
   effect(KILL cr);
   format %{ "ADDB  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addb($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7511,7 +7501,7 @@
   effect(KILL cr);
   format %{ "XADDB  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddb($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7523,7 +7513,7 @@
   effect(KILL cr);
   format %{ "ADDS  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addw($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7534,7 +7524,7 @@
   effect(KILL cr);
   format %{ "XADDS  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddw($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7546,7 +7536,7 @@
   effect(KILL cr);
   format %{ "ADDL  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addl($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7557,7 +7547,7 @@
   effect(KILL cr);
   format %{ "XADDL  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddl($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
--- a/src/hotspot/cpu/x86/x86_64.ad	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -2341,9 +2341,7 @@
 
   enc_class lock_prefix()
   %{
-    if (os::is_MP()) {
-      emit_opcode(cbuf, 0xF0); // lock
-    }
+    emit_opcode(cbuf, 0xF0); // lock
   %}
 
   enc_class REX_mem(memory mem)
@@ -6601,11 +6599,7 @@
 
   format %{
     $$template
-    if (os::is_MP()) {
-      $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
-    } else {
-      $$emit$$"MEMBAR-volatile ! (empty encoding)"
-    }
+    $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile"
   %}
   ins_encode %{
     __ membar(Assembler::StoreLoad);
@@ -7801,7 +7795,7 @@
   effect(KILL cr);
   format %{ "ADDB  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addb($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7812,7 +7806,7 @@
   effect(KILL cr);
   format %{ "XADDB  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddb($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7824,7 +7818,7 @@
   effect(KILL cr);
   format %{ "ADDW  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addw($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7835,7 +7829,7 @@
   effect(KILL cr);
   format %{ "XADDW  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddw($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7847,7 +7841,7 @@
   effect(KILL cr);
   format %{ "ADDL  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addl($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7858,7 +7852,7 @@
   effect(KILL cr);
   format %{ "XADDL  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddl($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7870,7 +7864,7 @@
   effect(KILL cr);
   format %{ "ADDQ  [$mem],$add" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ addq($mem$$Address, $add$$constant);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -7881,7 +7875,7 @@
   effect(KILL cr);
   format %{ "XADDQ  [$mem],$newval" %}
   ins_encode %{
-    if (os::is_MP()) { __ lock(); }
+    __ lock();
     __ xaddq($mem$$Address, $newval$$Register);
   %}
   ins_pipe( pipe_cmpxchg );
@@ -10898,7 +10892,7 @@
   ins_pipe(pipe_slow);
 %}
 
-instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, 
+instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
                         Universe dummy, rFlagsReg cr)
 %{
   predicate(((ClearArrayNode*)n)->is_large());
@@ -10942,7 +10936,7 @@
     }
   %}
   ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
                  $tmp$$XMMRegister, true);
   %}
   ins_pipe(pipe_slow);
--- a/src/hotspot/os/linux/os_linux.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -137,7 +137,6 @@
 address   os::Linux::_initial_thread_stack_bottom = NULL;
 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 
-int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 Mutex* os::Linux::_createThread_lock = NULL;
@@ -268,8 +267,7 @@
 
 // Most versions of linux have a bug where the number of processors are
 // determined by looking at the /proc file system.  In a chroot environment,
-// the system call returns 1.  This causes the VM to act as if it is
-// a single processor and elide locking (see is_MP() call).
+// the system call returns 1.
 static bool unsafe_chroot_detected = false;
 static const char *unstable_chroot_error = "/proc file system not found.\n"
                      "Java may be unstable running multithreaded in a chroot "
@@ -1173,6 +1171,10 @@
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
+#ifndef SUPPORTS_CLOCK_MONOTONIC
+#error "Build platform doesn't support clock_gettime and related functionality"
+#endif
+
 // Time since start-up in seconds to a fine granularity.
 // Used by VMSelfDestructTimer and the MemProfiler.
 double os::elapsedTime() {
@@ -1218,62 +1220,6 @@
   nanos = jlong(time.tv_usec) * 1000;
 }
 
-
-#ifndef CLOCK_MONOTONIC
-  #define CLOCK_MONOTONIC (1)
-#endif
-
-void os::Linux::clock_init() {
-  // we do dlopen's in this particular order due to bug in linux
-  // dynamical loader (see 6348968) leading to crash on exit
-  void* handle = dlopen("librt.so.1", RTLD_LAZY);
-  if (handle == NULL) {
-    handle = dlopen("librt.so", RTLD_LAZY);
-  }
-
-  if (handle) {
-    int (*clock_getres_func)(clockid_t, struct timespec*) =
-           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
-    int (*clock_gettime_func)(clockid_t, struct timespec*) =
-           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
-    if (clock_getres_func && clock_gettime_func) {
-      // See if monotonic clock is supported by the kernel. Note that some
-      // early implementations simply return kernel jiffies (updated every
-      // 1/100 or 1/1000 second). It would be bad to use such a low res clock
-      // for nano time (though the monotonic property is still nice to have).
-      // It's fixed in newer kernels, however clock_getres() still returns
-      // 1/HZ. We check if clock_getres() works, but will ignore its reported
-      // resolution for now. Hopefully as people move to new kernels, this
-      // won't be a problem.
-      struct timespec res;
-      struct timespec tp;
-      if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
-          clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
-        // yes, monotonic clock is supported
-        _clock_gettime = clock_gettime_func;
-        return;
-      } else {
-        // close librt if there is no monotonic clock
-        dlclose(handle);
-      }
-    }
-  }
-  warning("No monotonic clock was available - timed services may " \
-          "be adversely affected if the time-of-day clock changes");
-}
-
-#ifndef SYS_clock_getres
-  #if defined(X86) || defined(PPC64) || defined(S390)
-    #define SYS_clock_getres AMD64_ONLY(229) IA32_ONLY(266) PPC64_ONLY(247) S390_ONLY(261)
-    #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-  #else
-    #warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
-    #define sys_clock_getres(x,y)  -1
-  #endif
-#else
-  #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-#endif
-
 void os::Linux::fast_thread_clock_init() {
   if (!UseLinuxPosixThreadCPUClocks) {
     return;
@@ -1284,17 +1230,17 @@
       (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
 
   // Switch to using fast clocks for thread cpu time if
-  // the sys_clock_getres() returns 0 error code.
+  // the clock_getres() returns 0 error code.
   // Note, that some kernels may support the current thread
   // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
   // returned by the pthread_getcpuclockid().
-  // If the fast Posix clocks are supported then the sys_clock_getres()
+  // If the fast Posix clocks are supported then the clock_getres()
   // must return at least tp.tv_sec == 0 which means a resolution
   // better than 1 sec. This is extra check for reliability.
 
   if (pthread_getcpuclockid_func &&
       pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
-      sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
+      os::Posix::clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
     _supports_fast_thread_cpu_time = true;
     _pthread_getcpuclockid = pthread_getcpuclockid_func;
   }
@@ -1303,7 +1249,7 @@
 jlong os::javaTimeNanos() {
   if (os::supports_monotonic_clock()) {
     struct timespec tp;
-    int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
+    int status = os::Posix::clock_gettime(CLOCK_MONOTONIC, &tp);
     assert(status == 0, "gettime error");
     jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
     return result;
@@ -1622,9 +1568,6 @@
         // This is OK - No Java threads have been created yet, and hence no
         // stack guard pages to fix.
         //
-        // This should happen only when you are building JDK7 using a very
-        // old version of JDK6 (e.g., with JPRT) and running test_gamma.
-        //
         // Dynamic loader will make all stacks executable after
         // this function returns, and will not do that again.
         assert(Threads::number_of_threads() == 0, "no Java threads should exist yet.");
@@ -1877,12 +1820,16 @@
   return (void*)::dlopen(NULL, RTLD_LAZY);
 }
 
-static bool _print_ascii_file(const char* filename, outputStream* st) {
+static bool _print_ascii_file(const char* filename, outputStream* st, const char* hdr = NULL) {
   int fd = ::open(filename, O_RDONLY);
   if (fd == -1) {
     return false;
   }
 
+  if (hdr != NULL) {
+    st->print_cr("%s", hdr);
+  }
+
   char buf[33];
   int bytes;
   buf[32] = '\0';
@@ -1975,6 +1922,8 @@
 
   os::Linux::print_proc_sys_info(st);
 
+  os::Linux::print_ld_preload_file(st);
+
   os::Linux::print_container_info(st);
 }
 
@@ -2133,6 +2082,11 @@
   st->cr();
 }
 
+void os::Linux::print_ld_preload_file(outputStream* st) {
+  _print_ascii_file("/etc/ld.so.preload", st, "\n/etc/ld.so.preload:");
+  st->cr();
+}
+
 void os::Linux::print_container_info(outputStream* st) {
   if (!OSContainer::is_containerized()) {
     return;
@@ -2471,7 +2425,7 @@
 static struct timespec create_semaphore_timespec(unsigned int sec, int nsec) {
   struct timespec ts;
   // Semaphore's are always associated with CLOCK_REALTIME
-  os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
+  os::Posix::clock_gettime(CLOCK_REALTIME, &ts);
   // see os_posix.cpp for discussion on overflow checking
   if (sec >= MAX_SECS) {
     ts.tv_sec += MAX_SECS;
@@ -4704,7 +4658,7 @@
 
 jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
   struct timespec tp;
-  int rc = os::Linux::clock_gettime(clockid, &tp);
+  int rc = os::Posix::clock_gettime(clockid, &tp);
   assert(rc == 0, "clock_gettime is expected to return 0 code");
 
   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
@@ -4976,14 +4930,19 @@
   // _main_thread points to the thread that created/loaded the JVM.
   Linux::_main_thread = pthread_self();
 
-  Linux::clock_init();
-  initial_time_count = javaTimeNanos();
-
   // retrieve entry point for pthread_setname_np
   Linux::_pthread_setname_np =
     (int(*)(pthread_t, const char*))dlsym(RTLD_DEFAULT, "pthread_setname_np");
 
   os::Posix::init();
+
+  initial_time_count = javaTimeNanos();
+
+  // Always warn if no monotonic clock available
+  if (!os::Posix::supports_monotonic_clock()) {
+    warning("No monotonic clock was available - timed services may "    \
+            "be adversely affected if the time-of-day clock changes");
+  }
 }
 
 // To install functions for atexit system call
--- a/src/hotspot/os/linux/os_linux.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os/linux/os_linux.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -43,7 +43,6 @@
 
   static void check_signal_handler(int sig);
 
-  static int (*_clock_gettime)(clockid_t, struct timespec *);
   static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
   static int (*_pthread_setname_np)(pthread_t, const char*);
 
@@ -114,6 +113,7 @@
   static void print_distro_info(outputStream* st);
   static void print_libversion_info(outputStream* st);
   static void print_proc_sys_info(outputStream* st);
+  static void print_ld_preload_file(outputStream* st);
 
  public:
   static bool _stack_is_executable;
@@ -189,16 +189,9 @@
   static bool manually_expand_stack(JavaThread * t, address addr);
   static int max_register_window_saves_before_flushing();
 
-  // Real-time clock functions
-  static void clock_init(void);
-
   // fast POSIX clocks support
   static void fast_thread_clock_init(void);
 
-  static int clock_gettime(clockid_t clock_id, struct timespec *tp) {
-    return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
-  }
-
   static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
     return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
   }
--- a/src/hotspot/os/linux/os_linux.inline.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os/linux/os_linux.inline.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -141,7 +141,7 @@
 }
 
 inline bool os::supports_monotonic_clock() {
-  return Linux::_clock_gettime != NULL;
+  return os::Posix::supports_monotonic_clock();
 }
 
 inline void os::exit(int num) {
--- a/src/hotspot/os/posix/os_posix.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os/posix/os_posix.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1609,10 +1609,25 @@
 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
 
 static int (*_clock_gettime)(clockid_t, struct timespec *);
+static int (*_clock_getres)(clockid_t, struct timespec *);
 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
 
 static bool _use_clock_monotonic_condattr;
 
+// Exported clock functionality
+
+int os::Posix::clock_gettime(clockid_t clock_id, struct timespec *tp) {
+  return _clock_gettime != NULL ? _clock_gettime(clock_id, tp) : -1;
+}
+
+int os::Posix::clock_getres(clockid_t clock_id, struct timespec *tp) {
+  return _clock_getres != NULL ? _clock_getres(clock_id, tp) : -1;
+}
+
+bool os::Posix::supports_monotonic_clock() {
+  return _clock_gettime != NULL;
+}
+
 // Determine what POSIX API's are present and do appropriate
 // configuration.
 void os::Posix::init(void) {
@@ -1620,8 +1635,6 @@
   // NOTE: no logging available when this is called. Put logging
   // statements in init_2().
 
-  // Copied from os::Linux::clock_init(). The duplication is temporary.
-
   // 1. Check for CLOCK_MONOTONIC support.
 
   void* handle = NULL;
@@ -1642,6 +1655,7 @@
   }
 
   _clock_gettime = NULL;
+  _clock_getres = NULL;
 
   int (*clock_getres_func)(clockid_t, struct timespec*) =
     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
@@ -1656,6 +1670,7 @@
         clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
       // Yes, monotonic clock is supported.
       _clock_gettime = clock_gettime_func;
+      _clock_getres = clock_getres_func;
     } else {
 #ifdef NEEDS_LIBRT
       // Close librt if there is no monotonic clock.
--- a/src/hotspot/os/posix/os_posix.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os/posix/os_posix.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -116,6 +116,18 @@
   // Returns true if either given uid is effective uid and given gid is
   // effective gid, or if given uid is root.
   static bool matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid);
+
+#ifdef SUPPORTS_CLOCK_MONOTONIC
+
+  static bool supports_monotonic_clock();
+  static int clock_gettime(clockid_t clock_id, struct timespec *tp);
+  static int clock_getres(clockid_t clock_id, struct timespec *tp);
+
+#else
+
+  static bool supports_monotonic_clock() { return false; }
+
+#endif
 };
 
 // On POSIX platforms the signal handler is global so we just do the write.
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -136,7 +136,7 @@
 
 extern "C" {
   // defined in bsd_x86.s
-  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
+  int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
   void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
 }
 
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.s	Thu Oct 04 14:17:59 2018 +0530
@@ -635,8 +635,7 @@
 
         # Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
         #                                     volatile int64_t* dest,
-        #                                     int64_t compare_value,
-        #                                     bool is_MP)
+        #                                     int64_t compare_value)
         #
         .p2align 4,,15
         ELF_TYPE(_Atomic_cmpxchg_long,@function)
@@ -649,10 +648,8 @@
         movl     24(%esp), %eax    # 24(%esp) : compare_value (low)
         movl     28(%esp), %edx    # 28(%esp) : compare_value (high)
         movl     20(%esp), %edi    # 20(%esp) : dest
-        cmpl     $0, 32(%esp)      # 32(%esp) : is_MP
-        je       1f
         lock
-1:      cmpxchg8b (%edi)
+        cmpxchg8b (%edi)
         popl     %edi
         popl     %ebx
         ret
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -50,17 +50,12 @@
 //
 // inline void _OrderAccess_dsb() {
 //    volatile intptr_t dummy = 0;
-//    if (os::is_MP()) {
-//      __asm__ volatile (
-//        "mcr p15, 0, %0, c7, c10, 4"
-//        : : "r" (dummy) : "memory");
-//    }
+//    __asm__ volatile (
+//      "mcr p15, 0, %0, c7, c10, 4"
+//      : : "r" (dummy) : "memory");
 // }
 
 inline static void dmb_sy() {
-   if (!os::is_MP()) {
-     return;
-   }
 #ifdef AARCH64
    __asm__ __volatile__ ("dmb sy" : : : "memory");
 #else
@@ -82,9 +77,6 @@
 }
 
 inline static void dmb_st() {
-   if (!os::is_MP()) {
-     return;
-   }
 #ifdef AARCH64
    __asm__ __volatile__ ("dmb st" : : : "memory");
 #else
@@ -108,9 +100,6 @@
 // Load-Load/Store barrier
 inline static void dmb_ld() {
 #ifdef AARCH64
-   if (!os::is_MP()) {
-     return;
-   }
    __asm__ __volatile__ ("dmb ld" : : : "memory");
 #else
    dmb_sy();
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -394,11 +394,9 @@
       _id(id)
     , _info(NULL)
     , _index(index) {
-    if (os::is_MP()) {
-      // force alignment of patch sites on MP hardware so we
-      // can guarantee atomic writes to the patch site.
-      align_patch_site(masm);
-    }
+    // force alignment of patch sites so we
+    // can guarantee atomic writes to the patch site.
+    align_patch_site(masm);
     _pc_start = masm->pc();
     masm->bind(_patch_site_entry);
   }
--- a/src/hotspot/share/c1/c1_InstructionPrinter.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/c1/c1_InstructionPrinter.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -918,18 +918,16 @@
 }
 
 void InstructionPrinter::do_MemBar(MemBar* x) {
-  if (os::is_MP()) {
-    LIR_Code code = x->code();
-    switch (code) {
-      case lir_membar_acquire   : output()->print("membar_acquire"); break;
-      case lir_membar_release   : output()->print("membar_release"); break;
-      case lir_membar           : output()->print("membar"); break;
-      case lir_membar_loadload  : output()->print("membar_loadload"); break;
-      case lir_membar_storestore: output()->print("membar_storestore"); break;
-      case lir_membar_loadstore : output()->print("membar_loadstore"); break;
-      case lir_membar_storeload : output()->print("membar_storeload"); break;
-      default                   : ShouldNotReachHere(); break;
-    }
+  LIR_Code code = x->code();
+  switch (code) {
+  case lir_membar_acquire   : output()->print("membar_acquire"); break;
+  case lir_membar_release   : output()->print("membar_release"); break;
+  case lir_membar           : output()->print("membar"); break;
+  case lir_membar_loadload  : output()->print("membar_loadload"); break;
+  case lir_membar_storestore: output()->print("membar_storestore"); break;
+  case lir_membar_loadstore : output()->print("membar_loadstore"); break;
+  case lir_membar_storeload : output()->print("membar_storeload"); break;
+  default                   : ShouldNotReachHere(); break;
   }
 }
 
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -446,10 +446,8 @@
 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
   verify_oop_map(op->info());
 
-  if (os::is_MP()) {
-    // must align calls sites, otherwise they can't be updated atomically on MP hardware
-    align_call(op->code());
-  }
+  // must align calls sites, otherwise they can't be updated atomically
+  align_call(op->code());
 
   // emit the static call stub stuff out of line
   emit_static_call_stub();
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -3068,13 +3068,13 @@
     break;
 
   case vmIntrinsics::_loadFence :
-    if (os::is_MP()) __ membar_acquire();
+    __ membar_acquire();
     break;
   case vmIntrinsics::_storeFence:
-    if (os::is_MP()) __ membar_release();
+    __ membar_release();
     break;
   case vmIntrinsics::_fullFence :
-    if (os::is_MP()) __ membar();
+    __ membar();
     break;
   case vmIntrinsics::_onSpinWait:
     __ on_spin_wait();
@@ -3623,18 +3623,16 @@
 }
 
 void LIRGenerator::do_MemBar(MemBar* x) {
-  if (os::is_MP()) {
-    LIR_Code code = x->code();
-    switch(code) {
-      case lir_membar_acquire   : __ membar_acquire(); break;
-      case lir_membar_release   : __ membar_release(); break;
-      case lir_membar           : __ membar(); break;
-      case lir_membar_loadload  : __ membar_loadload(); break;
-      case lir_membar_storestore: __ membar_storestore(); break;
-      case lir_membar_loadstore : __ membar_loadstore(); break;
-      case lir_membar_storeload : __ membar_storeload(); break;
-      default                   : ShouldNotReachHere(); break;
-    }
+  LIR_Code code = x->code();
+  switch(code) {
+  case lir_membar_acquire   : __ membar_acquire(); break;
+  case lir_membar_release   : __ membar_release(); break;
+  case lir_membar           : __ membar(); break;
+  case lir_membar_loadload  : __ membar_loadload(); break;
+  case lir_membar_storestore: __ membar_storestore(); break;
+  case lir_membar_loadstore : __ membar_loadstore(); break;
+  case lir_membar_storeload : __ membar_storeload(); break;
+  default                   : ShouldNotReachHere(); break;
   }
 }
 
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -847,8 +847,32 @@
 // call into patch_code and complete the patching process by copying
 // the patch body back into the main part of the nmethod and resume
 // executing.
+
+// NB:
 //
+// Patchable instruction sequences inherently exhibit race conditions,
+// where thread A is patching an instruction at the same time thread B
+// is executing it.  The algorithms we use ensure that any observation
+// that B can make on any intermediate states during A's patching will
+// always end up with a correct outcome.  This is easiest if there are
+// few or no intermediate states.  (Some inline caches have two
+// related instructions that must be patched in tandem.  For those,
+// intermediate states seem to be unavoidable, but we will get the
+// right answer from all possible observation orders.)
 //
+// When patching the entry instruction at the head of a method, or a
+// linkable call instruction inside of a method, we try very hard to
+// use a patch sequence which executes as a single memory transaction.
+// This means, in practice, that when thread A patches an instruction,
+// it should patch a 32-bit or 64-bit word that somehow overlaps the
+// instruction or is contained in it.  We believe that memory hardware
+// will never break up such a word write, if it is naturally aligned
+// for the word being written.  We also know that some CPUs work very
+// hard to create atomic updates even of naturally unaligned words,
+// but we don't want to bet the farm on this always working.
+//
+// Therefore, if there is any chance of a race condition, we try to
+// patch only naturally aligned words, as single, full-word writes.
 
 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
@@ -907,7 +931,7 @@
     // We need to only cover T_LONG and T_DOUBLE fields, as we can
     // break access atomicity only for them.
 
-    // Strictly speaking, the deoptimizaation on 64-bit platforms
+    // Strictly speaking, the deoptimization on 64-bit platforms
     // is unnecessary, and T_LONG stores on 32-bit platforms need
     // to be handled by special patching code when AlwaysAtomicAccesses
     // becomes product feature. At this point, we are still going
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -399,8 +399,8 @@
 
   // Now we need to check the SystemDictionary
   Symbol* sym = name->get_symbol();
-  if (sym->byte_at(0) == 'L' &&
-    sym->byte_at(sym->utf8_length()-1) == ';') {
+  if (sym->char_at(0) == 'L' &&
+    sym->char_at(sym->utf8_length()-1) == ';') {
     // This is a name from a signature.  Strip off the trimmings.
     // Call recursive to keep scope of strippedsym.
     TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
@@ -427,7 +427,7 @@
 
   // setup up the proper type to return on OOM
   ciKlass* fail_type;
-  if (sym->byte_at(0) == '[') {
+  if (sym->char_at(0) == '[') {
     fail_type = _unloaded_ciobjarrayklass;
   } else {
     fail_type = _unloaded_ciinstance_klass;
@@ -453,8 +453,8 @@
   // we must build an array type around it.  The CI requires array klasses
   // to be loaded if their element klasses are loaded, except when memory
   // is exhausted.
-  if (sym->byte_at(0) == '[' &&
-      (sym->byte_at(1) == '[' || sym->byte_at(1) == 'L')) {
+  if (sym->char_at(0) == '[' &&
+      (sym->char_at(1) == '[' || sym->char_at(1) == 'L')) {
     // We have an unloaded array.
     // Build it on the fly if the element class exists.
     TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -116,7 +116,7 @@
                                  jobject loader, jobject protection_domain)
   : ciKlass(name, T_OBJECT)
 {
-  assert(name->byte_at(0) != '[', "not an instance klass");
+  assert(name->char_at(0) != '[', "not an instance klass");
   _init_state = (InstanceKlass::ClassState)0;
   _nonstatic_field_size = -1;
   _has_nonstatic_fields = false;
@@ -299,7 +299,7 @@
     return false;
 
   // Test for trailing '/'
-  if ((char) name()->byte_at(len) != '/')
+  if (name()->char_at(len) != '/')
     return false;
 
   // Make sure it's not actually in a subpackage:
--- a/src/hotspot/share/ci/ciObjArrayKlass.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciObjArrayKlass.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -112,9 +112,9 @@
   Symbol* base_name_sym = element_name->get_symbol();
   char* name;
 
-  if (base_name_sym->byte_at(0) == '[' ||
-      (base_name_sym->byte_at(0) == 'L' &&  // watch package name 'Lxx'
-       base_name_sym->byte_at(element_len-1) == ';')) {
+  if (base_name_sym->char_at(0) == '[' ||
+      (base_name_sym->char_at(0) == 'L' &&  // watch package name 'Lxx'
+       base_name_sym->char_at(element_len-1) == ';')) {
 
     int new_len = element_len + dimension + 1; // for the ['s and '\0'
     name = CURRENT_THREAD_ENV->name_buffer(new_len);
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -466,7 +466,7 @@
 
   // Two cases: this is an unloaded ObjArrayKlass or an
   // unloaded InstanceKlass.  Deal with both.
-  if (name->byte_at(0) == '[') {
+  if (name->char_at(0) == '[') {
     // Decompose the name.'
     FieldArrayInfo fd;
     BasicType element_type = FieldType::get_array_info(name->get_symbol(),
--- a/src/hotspot/share/ci/ciSymbol.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciSymbol.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,14 +68,14 @@
 
 // ------------------------------------------------------------------
 // ciSymbol::base
-const jbyte* ciSymbol::base() {
+const u1* ciSymbol::base() {
   GUARDED_VM_ENTRY(return get_symbol()->base();)
 }
 
 // ------------------------------------------------------------------
-// ciSymbol::byte_at
-int ciSymbol::byte_at(int i) {
-  GUARDED_VM_ENTRY(return get_symbol()->byte_at(i);)
+// ciSymbol::char_at
+char ciSymbol::char_at(int i) {
+  GUARDED_VM_ENTRY(return get_symbol()->char_at(i);)
 }
 
 // ------------------------------------------------------------------
--- a/src/hotspot/share/ci/ciSymbol.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/ci/ciSymbol.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
   void print_impl(outputStream* st);
 
   // This is public in Symbol* but private here, because the base can move:
-  const jbyte* base();
+  const u1* base();
 
   // Make a ciSymbol from a C string (implementation).
   static ciSymbol* make_impl(const char* s);
@@ -77,8 +77,8 @@
   // The text of the symbol as ascii with all non-printable characters quoted as \u####
   const char* as_quoted_ascii();
 
-  // Return the i-th utf8 byte, where i < utf8_length
-  int         byte_at(int i);
+  // Return the i-th utf byte as a char, where i < utf8_length
+  char        char_at(int i);
 
   // Tests if the symbol starts with the given prefix.
   bool starts_with(const char* prefix, int len) const;
--- a/src/hotspot/share/classfile/classFileParser.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -655,7 +655,7 @@
             "Illegal zero length constant pool entry at %d in class %s",
             name_index, CHECK);
 
-          if (sig->byte_at(0) == JVM_SIGNATURE_FUNC) {
+          if (sig->char_at(0) == JVM_SIGNATURE_FUNC) {
             // Format check method name and signature
             verify_legal_method_name(name, CHECK);
             verify_legal_method_signature(name, sig, CHECK);
@@ -682,7 +682,7 @@
           // CONSTANT_Dynamic's name and signature are verified above, when iterating NameAndType_info.
           // Need only to be sure signature is non-zero length and the right type.
           if (signature->utf8_length() == 0 ||
-              signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
+              signature->char_at(0) == JVM_SIGNATURE_FUNC) {
             throwIllegalSignature("CONSTANT_Dynamic", name, signature, CHECK);
           }
         }
@@ -707,7 +707,7 @@
             // Field name and signature are verified above, when iterating NameAndType_info.
             // Need only to be sure signature is non-zero length and the right type.
             if (signature->utf8_length() == 0 ||
-                signature->byte_at(0) == JVM_SIGNATURE_FUNC) {
+                signature->char_at(0) == JVM_SIGNATURE_FUNC) {
               throwIllegalSignature("Field", name, signature, CHECK);
             }
           }
@@ -716,7 +716,7 @@
             // Method name and signature are verified above, when iterating NameAndType_info.
             // Need only to be sure signature is non-zero length and the right type.
             if (signature->utf8_length() == 0 ||
-                signature->byte_at(0) != JVM_SIGNATURE_FUNC) {
+                signature->char_at(0) != JVM_SIGNATURE_FUNC) {
               throwIllegalSignature("Method", name, signature, CHECK);
             }
           }
@@ -724,7 +724,7 @@
           const unsigned int name_len = name->utf8_length();
           if (tag == JVM_CONSTANT_Methodref &&
               name_len != 0 &&
-              name->byte_at(0) == '<' &&
+              name->char_at(0) == '<' &&
               name != vmSymbols::object_initializer_name()) {
             classfile_parse_error(
               "Bad method name at constant pool index %u in class file %s",
@@ -942,7 +942,7 @@
 
         // Don't need to check legal name because it's checked when parsing constant pool.
         // But need to make sure it's not an array type.
-        guarantee_property(unresolved_klass->byte_at(0) != JVM_SIGNATURE_ARRAY,
+        guarantee_property(unresolved_klass->char_at(0) != JVM_SIGNATURE_ARRAY,
                            "Bad interface name in class file %s", CHECK);
 
         // Call resolve_super so classcircularity is checked
@@ -3752,7 +3752,7 @@
       if (need_verify)
         is_array = super_klass->is_array_klass();
     } else if (need_verify) {
-      is_array = (cp->klass_name_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY);
+      is_array = (cp->klass_name_at(super_class_index)->char_at(0) == JVM_SIGNATURE_ARRAY);
     }
     if (need_verify) {
       guarantee_property(!is_array,
@@ -5379,7 +5379,7 @@
     // The first non-signature thing better be a ')'
     if ((length > 0) && (*p++ == JVM_SIGNATURE_ENDFUNC)) {
       length--;
-      if (name->utf8_length() > 0 && name->byte_at(0) == '<') {
+      if (name->utf8_length() > 0 && name->char_at(0) == '<') {
         // All internal methods must return void
         if ((length == 1) && (p[0] == JVM_SIGNATURE_VOID)) {
           return args_size;
@@ -5796,7 +5796,7 @@
 void ClassFileParser::fix_unsafe_anonymous_class_name(TRAPS) {
   assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class");
 
-  const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
+  const jbyte* anon_last_slash = UTF8::strrchr((const jbyte*)_class_name->base(),
                                                _class_name->utf8_length(), '/');
   if (anon_last_slash == NULL) {  // Unnamed package
     prepend_host_package_name(_unsafe_anonymous_host, CHECK);
@@ -6119,7 +6119,7 @@
   // It has been checked when constant pool is parsed.
   // However, make sure it is not an array type.
   if (_need_verify) {
-    guarantee_property(_class_name->byte_at(0) != JVM_SIGNATURE_ARRAY,
+    guarantee_property(_class_name->char_at(0) != JVM_SIGNATURE_ARRAY,
                        "Bad class name in class file %s",
                        CHECK);
   }
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -47,11 +47,10 @@
 // the singleton class the_null_class_loader_data().
 
 #include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
 #include "classfile/classLoaderData.inline.hpp"
+#include "classfile/classLoaderDataGraph.inline.hpp"
 #include "classfile/dictionary.hpp"
 #include "classfile/javaClasses.hpp"
-#include "classfile/metadataOnStackMark.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "classfile/packageEntry.hpp"
 #include "classfile/symbolTable.hpp"
@@ -60,9 +59,7 @@
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/metadataFactory.hpp"
-#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oopHandle.inline.hpp"
@@ -72,14 +69,10 @@
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
-#include "runtime/safepointVerifiers.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 
-volatile size_t ClassLoaderDataGraph::_num_array_classes = 0;
-volatile size_t ClassLoaderDataGraph::_num_instance_classes = 0;
-
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 void ClassLoaderData::init_null_class_loader_data() {
@@ -345,6 +338,11 @@
   for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     // Do not filter ArrayKlass oops here...
     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
+#ifdef ASSERT
+      oop m = k->java_mirror();
+      assert(m != NULL, "NULL mirror");
+      assert(m->is_a(SystemDictionary::Class_klass()), "invalid mirror");
+#endif
       klass_closure->do_klass(k);
     }
   }
@@ -444,13 +442,6 @@
   }
 }
 
-
-void ClassLoaderDataGraph::clear_claimed_marks() {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->clear_claimed();
-  }
-}
-
 void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) {
   {
     MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
@@ -478,78 +469,6 @@
   }
 }
 
-// Class iterator used by the compiler.  It gets some number of classes at
-// a safepoint to decay invocation counters on the methods.
-class ClassLoaderDataGraphKlassIteratorStatic {
-  ClassLoaderData* _current_loader_data;
-  Klass*           _current_class_entry;
- public:
-
-  ClassLoaderDataGraphKlassIteratorStatic() : _current_loader_data(NULL), _current_class_entry(NULL) {}
-
-  InstanceKlass* try_get_next_class() {
-    assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
-    size_t max_classes = ClassLoaderDataGraph::num_instance_classes();
-    assert(max_classes > 0, "should not be called with no instance classes");
-    for (size_t i = 0; i < max_classes; ) {
-
-      if (_current_class_entry != NULL) {
-        Klass* k = _current_class_entry;
-        _current_class_entry = _current_class_entry->next_link();
-
-        if (k->is_instance_klass()) {
-          InstanceKlass* ik = InstanceKlass::cast(k);
-          i++;  // count all instance classes found
-          // Not yet loaded classes are counted in max_classes
-          // but only return loaded classes.
-          if (ik->is_loaded()) {
-            return ik;
-          }
-        }
-      } else {
-        // Go to next CLD
-        if (_current_loader_data != NULL) {
-          _current_loader_data = _current_loader_data->next();
-        }
-        // Start at the beginning
-        if (_current_loader_data == NULL) {
-          _current_loader_data = ClassLoaderDataGraph::_head;
-        }
-
-        _current_class_entry = _current_loader_data->klasses();
-      }
-    }
-    // Should never be reached unless all instance classes have failed or are not fully loaded.
-    // Caller handles NULL.
-    return NULL;
-  }
-
-  // If the current class for the static iterator is a class being unloaded or
-  // deallocated, adjust the current class.
-  void adjust_saved_class(ClassLoaderData* cld) {
-    if (_current_loader_data == cld) {
-      _current_loader_data = cld->next();
-      if (_current_loader_data != NULL) {
-        _current_class_entry = _current_loader_data->klasses();
-      }  // else try_get_next_class will start at the head
-    }
-  }
-
-  void adjust_saved_class(Klass* klass) {
-    if (_current_class_entry == klass) {
-      _current_class_entry = klass->next_link();
-    }
-  }
-};
-
-static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
-
-InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
-  return static_klass_iterator.try_get_next_class();
-}
-
-
 void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
   if (loader_or_mirror() != NULL) {
     assert(_holder.is_null(), "never replace holders");
@@ -563,7 +482,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
 
   // Adjust global class iterator.
-  static_klass_iterator.adjust_saved_class(scratch_class);
+  ClassLoaderDataGraph::adjust_saved_class(scratch_class);
 
   Klass* prev = NULL;
   for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
@@ -611,7 +530,7 @@
   classes_do(InstanceKlass::unload_class);
 
   // Clean up global class iterator for compiler
-  static_klass_iterator.adjust_saved_class(this);
+  ClassLoaderDataGraph::adjust_saved_class(this);
 }
 
 ModuleEntryTable* ClassLoaderData::modules() {
@@ -914,41 +833,6 @@
   }
 }
 
-void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
-  uint loaders_processed = 0;
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    // is_alive check will be necessary for concurrent class unloading.
-    if (cld->is_alive()) {
-      // clean metaspace
-      if (walk_previous_versions) {
-        cld->classes_do(InstanceKlass::purge_previous_versions);
-      }
-      cld->free_deallocate_list();
-      loaders_processed++;
-    }
-  }
-  log_debug(class, loader, data)("clean_deallocate_lists: loaders processed %u %s",
-                                 loaders_processed, walk_previous_versions ? "walk_previous_versions" : "");
-}
-
-void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
-
-  _should_clean_deallocate_lists = false; // assume everything gets cleaned
-
-  // Mark metadata seen on the stack so we can delete unreferenced entries.
-  // Walk all metadata, including the expensive code cache walk, only for class redefinition.
-  // The MetadataOnStackMark walk during redefinition saves previous versions if it finds old methods
-  // on the stack or in the code cache, so we only have to repeat the full walk if
-  // they were found at that time.
-  // TODO: have redefinition clean old methods out of the code cache.  They still exist in some places.
-  bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset();
-
-  MetadataOnStackMark md_on_stack(walk_all_metadata);
-  clean_deallocate_lists(walk_all_metadata);
-}
-
 // This is distinct from free_deallocate_list.  For class loader data that are
 // unloading, this frees the C heap memory for items on the list, and unlinks
 // scratch or error classes so that unloading events aren't triggered for these
@@ -1070,523 +954,3 @@
   }
   return false;
 }
-
-
-// GC root of class loader data created.
-ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
-ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
-ClassLoaderData* ClassLoaderDataGraph::_saved_unloading = NULL;
-ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
-
-bool ClassLoaderDataGraph::_should_purge = false;
-bool ClassLoaderDataGraph::_should_clean_deallocate_lists = false;
-bool ClassLoaderDataGraph::_safepoint_cleanup_needed = false;
-bool ClassLoaderDataGraph::_metaspace_oom = false;
-
-// Add a new class loader data node to the list.  Assign the newly created
-// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
-
-  assert_lock_strong(ClassLoaderDataGraph_lock);
-
-  ClassLoaderData* cld;
-
-  // First check if another thread beat us to creating the CLD and installing
-  // it into the loader while we were waiting for the lock.
-  if (!is_unsafe_anonymous && loader.not_null()) {
-    cld = java_lang_ClassLoader::loader_data_acquire(loader());
-    if (cld != NULL) {
-      return cld;
-    }
-  }
-
-  // We mustn't GC until we've installed the ClassLoaderData in the Graph since the CLD
-  // contains oops in _handles that must be walked.  GC doesn't walk CLD from the
-  // loader oop in all collections, particularly young collections.
-  NoSafepointVerifier no_safepoints;
-
-  cld = new ClassLoaderData(loader, is_unsafe_anonymous);
-
-  // First install the new CLD to the Graph.
-  cld->set_next(_head);
-  _head = cld;
-
-  // Next associate with the class_loader.
-  if (!is_unsafe_anonymous) {
-    // Use OrderAccess, since readers need to get the loader_data only after
-    // it's added to the Graph
-    java_lang_ClassLoader::release_set_loader_data(loader(), cld);
-  }
-
-  // Lastly log, if requested
-  LogTarget(Trace, class, loader, data) lt;
-  if (lt.is_enabled()) {
-    ResourceMark rm;
-    LogStream ls(lt);
-    ls.print("create ");
-    cld->print_value_on(&ls);
-    ls.cr();
-  }
-  return cld;
-}
-
-ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
-  MutexLocker ml(ClassLoaderDataGraph_lock);
-  ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous);
-  return loader_data;
-}
-
-void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
-    cl->do_cld(cld);
-  }
-}
-
-void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  // Only walk the head until any clds not purged from prior unloading
-  // (CMS doesn't purge right away).
-  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
-    assert(cld->is_unloading(), "invariant");
-    cl->do_cld(cld);
-  }
-}
-
-void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
-    CLDClosure* closure = cld->keep_alive() ? strong : weak;
-    if (closure != NULL) {
-      closure->do_cld(cld);
-    }
-  }
-}
-
-void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  if (ClassUnloading) {
-    roots_cld_do(cl, NULL);
-  } else {
-    cld_do(cl);
-  }
-}
-
-// Closure for locking and iterating through classes.
-LockedClassesDo::LockedClassesDo(classes_do_func_t f) : _function(f) {
-  ClassLoaderDataGraph_lock->lock();
-}
-
-LockedClassesDo::LockedClassesDo() : _function(NULL) {
-  // callers provide their own do_klass
-  ClassLoaderDataGraph_lock->lock();
-}
-
-LockedClassesDo::~LockedClassesDo() { ClassLoaderDataGraph_lock->unlock(); }
-
-
-// Iterating over the CLDG needs to be locked because
-// unloading can remove entries concurrently soon.
-class ClassLoaderDataGraphIterator : public StackObj {
-  ClassLoaderData* _next;
-  HandleMark       _hm;  // clean up handles when this is done.
-  Handle           _holder;
-  Thread*          _thread;
-
-  void hold_next() {
-    if (_next != NULL) {
-      _holder = Handle(_thread, _next->holder_phantom());
-    }
-  }
-public:
-  ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head) {
-    _thread = Thread::current();
-    assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-    hold_next();
-  }
-
-  bool repeat() const {
-    return _next != NULL;
-  }
-
-  ClassLoaderData* get_next() {
-    ClassLoaderData* next = _next;
-    if (_next != NULL) {
-      _next = _next->next();
-      hold_next();
-    }
-    return next;
-  }
-};
-
-// These functions assume that the caller has locked the ClassLoaderDataGraph_lock
-// if they are not calling the function from a safepoint.
-void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->classes_do(klass_closure);
-  }
-}
-
-void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->classes_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::methods_do(void f(Method*)) {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->methods_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::modules_do(void f(ModuleEntry*)) {
-  assert_locked_or_safepoint(Module_lock);
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->modules_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::modules_unloading_do(void f(ModuleEntry*)) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  // Only walk the head until any clds not purged from prior unloading
-  // (CMS doesn't purge right away).
-  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
-    assert(cld->is_unloading(), "invariant");
-    cld->modules_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::packages_do(void f(PackageEntry*)) {
-  assert_locked_or_safepoint(Module_lock);
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->packages_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::packages_unloading_do(void f(PackageEntry*)) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  // Only walk the head until any clds not purged from prior unloading
-  // (CMS doesn't purge right away).
-  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
-    assert(cld->is_unloading(), "invariant");
-    cld->packages_do(f);
-  }
-}
-
-void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->loaded_classes_do(klass_closure);
-  }
-}
-
-// This case can block but cannot do unloading (called from CDS)
-void ClassLoaderDataGraph::unlocked_loaded_classes_do(KlassClosure* klass_closure) {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->loaded_classes_do(klass_closure);
-  }
-}
-
-
-void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  // Only walk the head until any clds not purged from prior unloading
-  // (CMS doesn't purge right away).
-  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
-    assert(cld->is_unloading(), "invariant");
-    cld->classes_do(f);
-  }
-}
-
-#define FOR_ALL_DICTIONARY(X)   ClassLoaderDataGraphIterator iter; \
-                                ClassLoaderData* X; \
-                                while ((X = iter.get_next()) != NULL) \
-                                  if (X->dictionary() != NULL)
-
-// Walk classes in the loaded class dictionaries in various forms.
-// Only walks the classes defined in this class loader.
-void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*)) {
-  FOR_ALL_DICTIONARY(cld) {
-    cld->dictionary()->classes_do(f);
-  }
-}
-
-// Only walks the classes defined in this class loader.
-void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS) {
-  FOR_ALL_DICTIONARY(cld) {
-    cld->dictionary()->classes_do(f, CHECK);
-  }
-}
-
-void ClassLoaderDataGraph::verify_dictionary() {
-  FOR_ALL_DICTIONARY(cld) {
-    cld->dictionary()->verify();
-  }
-}
-
-void ClassLoaderDataGraph::print_dictionary(outputStream* st) {
-  FOR_ALL_DICTIONARY(cld) {
-    st->print("Dictionary for ");
-    cld->print_value_on(st);
-    st->cr();
-    cld->dictionary()->print_on(st);
-    st->cr();
-  }
-}
-
-void ClassLoaderDataGraph::print_dictionary_statistics(outputStream* st) {
-  FOR_ALL_DICTIONARY(cld) {
-    ResourceMark rm;
-    stringStream tempst;
-    tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id());
-    cld->dictionary()->print_table_statistics(st, tempst.as_string());
-  }
-}
-
-GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");
-
-  GrowableArray<ClassLoaderData*>* array = new GrowableArray<ClassLoaderData*>();
-
-  // The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true);
-  ClassLoaderData* curr = _head;
-  while (curr != _saved_head) {
-    if (!curr->claimed()) {
-      array->push(curr);
-      LogTarget(Debug, class, loader, data) lt;
-      if (lt.is_enabled()) {
-        LogStream ls(lt);
-        ls.print("found new CLD: ");
-        curr->print_value_on(&ls);
-        ls.cr();
-      }
-    }
-
-    curr = curr->_next;
-  }
-
-  return array;
-}
-
-#ifndef PRODUCT
-bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-  for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
-    if (loader_data == data) {
-      return true;
-    }
-  }
-
-  return false;
-}
-#endif // PRODUCT
-
-// Move class loader data from main list to the unloaded list for unloading
-// and deallocation later.
-bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-
-  // Indicate whether safepoint cleanup is needed.
-  _safepoint_cleanup_needed |= do_cleaning;
-
-  ClassLoaderData* data = _head;
-  ClassLoaderData* prev = NULL;
-  bool seen_dead_loader = false;
-  uint loaders_processed = 0;
-  uint loaders_removed = 0;
-
-  // Save previous _unloading pointer for CMS which may add to unloading list before
-  // purging and we don't want to rewalk the previously unloaded class loader data.
-  _saved_unloading = _unloading;
-
-  data = _head;
-  while (data != NULL) {
-    if (data->is_alive()) {
-      prev = data;
-      data = data->next();
-      loaders_processed++;
-      continue;
-    }
-    seen_dead_loader = true;
-    loaders_removed++;
-    ClassLoaderData* dead = data;
-    dead->unload();
-    data = data->next();
-    // Remove from loader list.
-    // This class loader data will no longer be found
-    // in the ClassLoaderDataGraph.
-    if (prev != NULL) {
-      prev->set_next(data);
-    } else {
-      assert(dead == _head, "sanity check");
-      _head = data;
-    }
-    dead->set_next(_unloading);
-    _unloading = dead;
-  }
-
-  log_debug(class, loader, data)("do_unloading: loaders processed %u, loaders removed %u", loaders_processed, loaders_removed);
-
-  return seen_dead_loader;
-}
-
-// There's at least one dead class loader.  Purge refererences of healthy module
-// reads lists and package export lists to modules belonging to dead loaders.
-void ClassLoaderDataGraph::clean_module_and_package_info() {
-  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
-
-  ClassLoaderData* data = _head;
-  while (data != NULL) {
-    // Remove entries in the dictionary of live class loader that have
-    // initiated loading classes in a dead class loader.
-    if (data->dictionary() != NULL) {
-      data->dictionary()->do_unloading();
-    }
-    // Walk a ModuleEntry's reads, and a PackageEntry's exports
-    // lists to determine if there are modules on those lists that are now
-    // dead and should be removed.  A module's life cycle is equivalent
-    // to its defining class loader's life cycle.  Since a module is
-    // considered dead if its class loader is dead, these walks must
-    // occur after each class loader's aliveness is determined.
-    if (data->packages() != NULL) {
-      data->packages()->purge_all_package_exports();
-    }
-    if (data->modules_defined()) {
-      data->modules()->purge_all_module_reads();
-    }
-    data = data->next();
-  }
-}
-
-void ClassLoaderDataGraph::purge() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  ClassLoaderData* list = _unloading;
-  _unloading = NULL;
-  ClassLoaderData* next = list;
-  bool classes_unloaded = false;
-  while (next != NULL) {
-    ClassLoaderData* purge_me = next;
-    next = purge_me->next();
-    delete purge_me;
-    classes_unloaded = true;
-  }
-  if (classes_unloaded) {
-    Metaspace::purge();
-    set_metaspace_oom(false);
-  }
-}
-
-int ClassLoaderDataGraph::resize_if_needed() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  int resized = 0;
-  if (Dictionary::does_any_dictionary_needs_resizing()) {
-    FOR_ALL_DICTIONARY(cld) {
-      if (cld->dictionary()->resize_if_needed()) {
-        resized++;
-      }
-    }
-  }
-  return resized;
-}
-
-ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
-    : _next_klass(NULL) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  ClassLoaderData* cld = ClassLoaderDataGraph::_head;
-  Klass* klass = NULL;
-
-  // Find the first klass in the CLDG.
-  while (cld != NULL) {
-    assert_locked_or_safepoint(cld->metaspace_lock());
-    klass = cld->_klasses;
-    if (klass != NULL) {
-      _next_klass = klass;
-      return;
-    }
-    cld = cld->next();
-  }
-}
-
-Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass_in_cldg(Klass* klass) {
-  Klass* next = klass->next_link();
-  if (next != NULL) {
-    return next;
-  }
-
-  // No more klasses in the current CLD. Time to find a new CLD.
-  ClassLoaderData* cld = klass->class_loader_data();
-  assert_locked_or_safepoint(cld->metaspace_lock());
-  while (next == NULL) {
-    cld = cld->next();
-    if (cld == NULL) {
-      break;
-    }
-    next = cld->_klasses;
-  }
-
-  return next;
-}
-
-Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
-  Klass* head = _next_klass;
-
-  while (head != NULL) {
-    Klass* next = next_klass_in_cldg(head);
-
-    Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
-
-    if (old_head == head) {
-      return head; // Won the CAS.
-    }
-
-    head = old_head;
-  }
-
-  // Nothing more for the iterator to hand out.
-  assert(head == NULL, "head is " PTR_FORMAT ", expected not null:", p2i(head));
-  return NULL;
-}
-
-ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
-  _data = ClassLoaderDataGraph::_head;
-}
-
-ClassLoaderDataGraphMetaspaceIterator::~ClassLoaderDataGraphMetaspaceIterator() {}
-
-#ifndef PRODUCT
-// callable from debugger
-extern "C" int print_loader_data_graph() {
-  ResourceMark rm;
-  ClassLoaderDataGraph::print_on(tty);
-  return 0;
-}
-
-void ClassLoaderDataGraph::verify() {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->verify();
-  }
-}
-
-void ClassLoaderDataGraph::print_on(outputStream * const out) {
-  ClassLoaderDataGraphIterator iter;
-  while (iter.repeat()) {
-    ClassLoaderData* cld = iter.get_next();
-    cld->print_on(out);
-  }
-}
-#endif // PRODUCT
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -53,9 +53,8 @@
 // ClassLoaderData are stored in the runtime representation of classes,
 // and provides iterators for root tracing and other GC operations.
 
-class ClassLoaderData;
+class ClassLoaderDataGraph;
 class JNIMethodBlock;
-class Metadebug;
 class ModuleEntry;
 class PackageEntry;
 class ModuleEntryTable;
@@ -63,136 +62,6 @@
 class DictionaryEntry;
 class Dictionary;
 
-// GC root for walking class loader data created
-
-class ClassLoaderDataGraph : public AllStatic {
-  friend class ClassLoaderData;
-  friend class ClassLoaderDataGraphMetaspaceIterator;
-  friend class ClassLoaderDataGraphKlassIteratorAtomic;
-  friend class ClassLoaderDataGraphKlassIteratorStatic;
-  friend class ClassLoaderDataGraphIterator;
-  friend class VMStructs;
- private:
-  // All CLDs (except the null CLD) can be reached by walking _head->_next->...
-  static ClassLoaderData* _head;
-  static ClassLoaderData* _unloading;
-  // CMS support.
-  static ClassLoaderData* _saved_head;
-  static ClassLoaderData* _saved_unloading;
-  static bool _should_purge;
-
-  // Set if there's anything to purge in the deallocate lists or previous versions
-  // during a safepoint after class unloading in a full GC.
-  static bool _should_clean_deallocate_lists;
-  static bool _safepoint_cleanup_needed;
-
-  // OOM has been seen in metaspace allocation. Used to prevent some
-  // allocations until class unloading
-  static bool _metaspace_oom;
-
-  static volatile size_t  _num_instance_classes;
-  static volatile size_t  _num_array_classes;
-
-  static ClassLoaderData* add_to_graph(Handle class_loader, bool is_unsafe_anonymous);
-  static ClassLoaderData* add(Handle class_loader, bool is_unsafe_anonymous);
-
- public:
-  static ClassLoaderData* find_or_create(Handle class_loader);
-  static void clean_module_and_package_info();
-  static void purge();
-  static void clear_claimed_marks();
-  // Iteration through CLDG inside a safepoint; GC support
-  static void cld_do(CLDClosure* cl);
-  static void cld_unloading_do(CLDClosure* cl);
-  static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
-  static void always_strong_cld_do(CLDClosure* cl);
-  // klass do
-  // Walking classes through the ClassLoaderDataGraph include array classes.  It also includes
-  // classes that are allocated but not loaded, classes that have errors, and scratch classes
-  // for redefinition.  These classes are removed during the next class unloading.
-  // Walking the ClassLoaderDataGraph also includes unsafe anonymous classes.
-  static void classes_do(KlassClosure* klass_closure);
-  static void classes_do(void f(Klass* const));
-  static void methods_do(void f(Method*));
-  static void modules_do(void f(ModuleEntry*));
-  static void modules_unloading_do(void f(ModuleEntry*));
-  static void packages_do(void f(PackageEntry*));
-  static void packages_unloading_do(void f(PackageEntry*));
-  static void loaded_classes_do(KlassClosure* klass_closure);
-  static void unlocked_loaded_classes_do(KlassClosure* klass_closure);
-  static void classes_unloading_do(void f(Klass* const));
-  static bool do_unloading(bool do_cleaning);
-
-  // Expose state to avoid logging overhead in safepoint cleanup tasks.
-  static inline bool should_clean_metaspaces_and_reset();
-  static void set_should_clean_deallocate_lists() { _should_clean_deallocate_lists = true; }
-  static void clean_deallocate_lists(bool purge_previous_versions);
-  static void walk_metadata_and_clean_metaspaces();
-
-  // dictionary do
-  // Iterate over all klasses in dictionary, but
-  // just the classes from defining class loaders.
-  static void dictionary_classes_do(void f(InstanceKlass*));
-  // Added for initialize_itable_for_klass to handle exceptions.
-  static void dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
-
-  // VM_CounterDecay iteration support
-  static InstanceKlass* try_get_next_class();
-
-  static void verify_dictionary();
-  static void print_dictionary(outputStream* st);
-  static void print_dictionary_statistics(outputStream* st);
-
-  // CMS support.
-  static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
-  static GrowableArray<ClassLoaderData*>* new_clds();
-
-  static void set_should_purge(bool b) { _should_purge = b; }
-  static void purge_if_needed() {
-    // Only purge the CLDG for CMS if concurrent sweep is complete.
-    if (_should_purge) {
-      purge();
-      // reset for next time.
-      set_should_purge(false);
-    }
-  }
-
-  static int resize_if_needed();
-
-  static bool has_metaspace_oom()           { return _metaspace_oom; }
-  static void set_metaspace_oom(bool value) { _metaspace_oom = value; }
-
-  static void print_on(outputStream * const out) PRODUCT_RETURN;
-  static void print() { print_on(tty); }
-  static void verify();
-
-  // instance and array class counters
-  static inline size_t num_instance_classes();
-  static inline size_t num_array_classes();
-  static inline void inc_instance_classes(size_t count);
-  static inline void dec_instance_classes(size_t count);
-  static inline void inc_array_classes(size_t count);
-  static inline void dec_array_classes(size_t count);
-
-#ifndef PRODUCT
-  static bool contains_loader_data(ClassLoaderData* loader_data);
-#endif
-};
-
-class LockedClassesDo : public KlassClosure {
-  typedef void (*classes_do_func_t)(Klass*);
-  classes_do_func_t _function;
-public:
-  LockedClassesDo();  // For callers who provide their own do_klass
-  LockedClassesDo(classes_do_func_t function);
-  ~LockedClassesDo();
-
-  void do_klass(Klass* k) {
-    (*_function)(k);
-  }
-};
-
-
 // ClassLoaderData class
 
 class ClassLoaderData : public CHeapObj<mtClass> {
@@ -314,7 +183,7 @@
   bool keep_alive() const       { return _keep_alive > 0; }
 
   oop holder_phantom() const;
-  void classes_do(void f(Klass*));
+  void classes_do(void f(Klass* const));
   void loaded_classes_do(KlassClosure* klass_closure);
   void classes_do(void f(InstanceKlass*));
   void methods_do(void f(Method*));
@@ -448,31 +317,4 @@
   JFR_ONLY(DEFINE_TRACE_ID_METHODS;)
 };
 
-// An iterator that distributes Klasses to parallel worker threads.
-class ClassLoaderDataGraphKlassIteratorAtomic : public StackObj {
- Klass* volatile _next_klass;
- public:
-  ClassLoaderDataGraphKlassIteratorAtomic();
-  Klass* next_klass();
- private:
-  static Klass* next_klass_in_cldg(Klass* klass);
-};
-
-class ClassLoaderDataGraphMetaspaceIterator : public StackObj {
-  ClassLoaderData* _data;
- public:
-  ClassLoaderDataGraphMetaspaceIterator();
-  ~ClassLoaderDataGraphMetaspaceIterator();
-  bool repeat() { return _data != NULL; }
-  ClassLoaderMetaspace* get_next() {
-    assert(_data != NULL, "Should not be NULL in call to the iterator");
-    ClassLoaderMetaspace* result = _data->metaspace_or_null();
-    _data = _data->next();
-    // This result might be NULL for class loaders without metaspace
-    // yet.  It would be nice to return only non-null results but
-    // there is no guarantee that there will be a non-null result
-    // down the list so the caller is going to have to check.
-    return result;
-  }
-};
 #endif // SHARE_VM_CLASSFILE_CLASSLOADERDATA_HPP
--- a/src/hotspot/share/classfile/classLoaderData.inline.hpp	Thu Oct 04 13:01:23 2018 +0530
+++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp	Thu Oct 04 14:17:59 2018 +0530
@@ -55,54 +55,4 @@
   return loader_data;
 }
 
-
-inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) {
-  guarantee(loader() != NULL && oopDesc::is_oop(loader()), "Loader must be oop");
-  // Gets the class loader data out of the java/lang/ClassLoader object, if non-null
-  // it's already in the loader_data, so no need to add
-  ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data_acquire(loader());
-  if (loader_data) {
-     return loader_data;
-  }
-  return ClassLoaderDataGraph::add(loader, false);
-}
-
-size_t ClassLoaderDataGraph::num_instance_classes() {
-  return _num_instance_classes;
-}
-
-size_t ClassLoaderDataGraph::num_array_classes() {
-  return _num_array_classes;
-}
-
-void ClassLoaderDataGraph::inc_instance_classes(size_t count) {
-  Atomic::add(count, &_num_instance_classes);
-}
-
-void ClassLoaderDataGraph::dec_instance_classes(size_t count) {
-  assert(count <= _num_instance_classes, "Sanity");
-  Atomic::sub(count, &_num_instance_classes);
-}
-
-void ClassLoaderDataGraph::inc_array_classes(size_t count) {
-  Atomic::add(count, &_num_array_classes);
-}
-
-void ClassLoaderDataGraph::dec_array_classes(size_t count) {
-  assert(count <= _num_array_classes, "Sanity");
-  Atomic::sub(count, &_num_array_classes);
-}
-
-bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
-  // Only clean metaspaces after full GC.
-  bool do_cleaning = _safepoint_cleanup_needed;
-#if INCLUDE_JVMTI
-  do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions());
-#else
-  do_cleaning = do_cleaning && _should_clean_deallocate_lists;
-#endif
-  _safepoint_cleanup_needed = false;  // reset
-  return do_cleaning;
-}
-
 #endif // SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Thu Oct 04 14:17:59 2018 +0530
@@ -0,0 +1,697 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoaderDataGraph.inline.hpp"
+#include "classfile/dictionary.hpp"
+#include "classfile/javaClasses.hpp"
+#include "classfile/metadataOnStackMark.hpp"
+#include "classfile/moduleEntry.hpp"
+#include "classfile/packageEntry.hpp"
+#include "logging/log.hpp"
+#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/safepointVerifiers.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ostream.hpp"
+
+volatile size_t ClassLoaderDataGraph::_num_array_classes = 0;
+volatile size_t ClassLoaderDataGraph::_num_instance_classes = 0;
+
+void ClassLoaderDataGraph::clear_claimed_marks() {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    cld->clear_claimed();
+  }
+}
+
+// Class iterator used by the compiler.  It gets some number of classes at
+// a safepoint to decay invocation counters on the methods.
+class ClassLoaderDataGraphKlassIteratorStatic {
+  ClassLoaderData* _current_loader_data;
+  Klass*           _current_class_entry;
+ public:
+
+  ClassLoaderDataGraphKlassIteratorStatic() : _current_loader_data(NULL), _current_class_entry(NULL) {}
+
+  InstanceKlass* try_get_next_class() {
+    assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+    size_t max_classes = ClassLoaderDataGraph::num_instance_classes();
+    assert(max_classes > 0, "should not be called with no instance classes");
+    for (size_t i = 0; i < max_classes; ) {
+
+      if (_current_class_entry != NULL) {
+        Klass* k = _current_class_entry;
+        _current_class_entry = _current_class_entry->next_link();
+
+        if (k->is_instance_klass()) {
+          InstanceKlass* ik = InstanceKlass::cast(k);
+          i++;  // count all instance classes found
+          // Not yet loaded classes are counted in max_classes
+          // but only return loaded classes.
+          if (ik->is_loaded()) {
+            return ik;
+          }
+        }
+      } else {
+        // Go to next CLD
+        if (_current_loader_data != NULL) {
+          _current_loader_data = _current_loader_data->next();
+        }
+        // Start at the beginning
+        if (_current_loader_data == NULL) {
+          _current_loader_data = ClassLoaderDataGraph::_head;
+        }
+
+        _current_class_entry = _current_loader_data->klasses();
+      }
+    }
+    // Should never be reached unless all instance classes have failed or are not fully loaded.
+    // Caller handles NULL.
+    return NULL;
+  }
+
+  // If the current class for the static iterator is a class being unloaded or
+  // deallocated, adjust the current class.
+  void adjust_saved_class(ClassLoaderData* cld) {
+    if (_current_loader_data == cld) {
+      _current_loader_data = cld->next();
+      if (_current_loader_data != NULL) {
+        _current_class_entry = _current_loader_data->klasses();
+      }  // else try_get_next_class will start at the head
+    }
+  }
+
+  void adjust_saved_class(Klass* klass) {
+    if (_current_class_entry == klass) {
+      _current_class_entry = klass->next_link();
+    }
+  }
+};
+
+static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
+
+InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
+  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  return static_klass_iterator.try_get_next_class();
+}
+
+void ClassLoaderDataGraph::adjust_saved_class(ClassLoaderData* cld) {
+  return static_klass_iterator.adjust_saved_class(cld);
+}
+
+void ClassLoaderDataGraph::adjust_saved_class(Klass* klass) {
+  return static_klass_iterator.adjust_saved_class(klass);
+}
+
+void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
+  uint loaders_processed = 0;
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    // is_alive check will be necessary for concurrent class unloading.
+    if (cld->is_alive()) {
+      // clean metaspace
+      if (walk_previous_versions) {
+        cld->classes_do(InstanceKlass::purge_previous_versions);
+      }
+      cld->free_deallocate_list();
+      loaders_processed++;
+    }
+  }
+  log_debug(class, loader, data)("clean_deallocate_lists: loaders processed %u %s",
+                                 loaders_processed, walk_previous_versions ? "walk_previous_versions" : "");
+}
+
+void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
+
+  _should_clean_deallocate_lists = false; // assume everything gets cleaned
+
+  // Mark metadata seen on the stack so we can delete unreferenced entries.
+  // Walk all metadata, including the expensive code cache walk, only for class redefinition.
+  // The MetadataOnStackMark walk during redefinition saves previous versions if it finds old methods
+  // on the stack or in the code cache, so we only have to repeat the full walk if
+  // they were found at that time.
+  // TODO: have redefinition clean old methods out of the code cache.  They still exist in some places.
+  bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset();
+
+  MetadataOnStackMark md_on_stack(walk_all_metadata);
+  clean_deallocate_lists(walk_all_metadata);
+}
+
+// GC root of class loader data created.
+ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
+ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
+ClassLoaderData* ClassLoaderDataGraph::_saved_unloading = NULL;
+ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
+
+bool ClassLoaderDataGraph::_should_purge = false;
+bool ClassLoaderDataGraph::_should_clean_deallocate_lists = false;
+bool ClassLoaderDataGraph::_safepoint_cleanup_needed = false;
+bool ClassLoaderDataGraph::_metaspace_oom = false;
+
+// Add a new class loader data node to the list.  Assign the newly created
+// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
+ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
+
+  assert_lock_strong(ClassLoaderDataGraph_lock);
+
+  ClassLoaderData* cld;
+
+  // First check if another thread beat us to creating the CLD and installing
+  // it into the loader while we were waiting for the lock.
+  if (!is_unsafe_anonymous && loader.not_null()) {
+    cld = java_lang_ClassLoader::loader_data_acquire(loader());
+    if (cld != NULL) {
+      return cld;
+    }
+  }
+
+  // We mustn't GC until we've installed the ClassLoaderData in the Graph since the CLD
+  // contains oops in _handles that must be walked.  GC doesn't walk CLD from the
+  // loader oop in all collections, particularly young collections.
+  NoSafepointVerifier no_safepoints;
+
+  cld = new ClassLoaderData(loader, is_unsafe_anonymous);
+
+  // First install the new CLD to the Graph.
+  cld->set_next(_head);
+  _head = cld;
+
+  // Next associate with the class_loader.
+  if (!is_unsafe_anonymous) {
+    // Use OrderAccess, since readers need to get the loader_data only after
+    // it's added to the Graph
+    java_lang_ClassLoader::release_set_loader_data(loader(), cld);
+  }
+
+  // Lastly log, if requested
+  LogTarget(Trace, class, loader, data) lt;
+  if (lt.is_enabled()) {
+    ResourceMark rm;
+    LogStream ls(lt);
+    ls.print("create ");
+    cld->print_value_on(&ls);
+    ls.cr();
+  }
+  return cld;
+}
+
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
+  MutexLocker ml(ClassLoaderDataGraph_lock);
+  ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous);
+  return loader_data;
+}
+
+void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
+    cl->do_cld(cld);
+  }
+}
+
+void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+  // Only walk the head until any clds not purged from prior unloading
+  // (CMS doesn't purge right away).
+  for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
+    assert(cld->is_unloading(), "invariant");
+    cl->do_cld(cld);
+  }
+}
+
+void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
+    CLDClosure* closure = cld->keep_alive() ? strong : weak;
+    if (closure != NULL) {
+      closure->do_cld(cld);
+    }
+  }
+}
+
+void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {