changeset 50545:93cb8fb7a843

Merge
author prr
date Wed, 02 May 2018 09:16:10 -0700
parents c87a5690e394 cece972575ac
children 9a36de1df5db
files make/Images.gmk src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.cpp src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.hpp src/hotspot/share/gc/g1/bufferingOopClosure.hpp src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.cpp src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.hpp src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.cpp src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.hpp src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.cpp src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.hpp src/hotspot/share/runtime/commandLineFlagConstraintList.cpp src/hotspot/share/runtime/commandLineFlagConstraintList.hpp src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.cpp src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.hpp src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.hpp src/hotspot/share/runtime/commandLineFlagRangeList.cpp src/hotspot/share/runtime/commandLineFlagRangeList.hpp src/hotspot/share/runtime/commandLineFlagWriteableList.cpp src/hotspot/share/runtime/commandLineFlagWriteableList.hpp src/java.base/windows/classes/java/net/DualStackPlainSocketImpl.java src/java.base/windows/native/libnet/DualStackPlainSocketImpl.c src/java.base/windows/native/libnet/portconfig.c src/java.net.http/share/classes/jdk/internal/net/http/common/ByteBufferPool.java src/java.net.http/share/classes/jdk/internal/net/http/common/ByteBufferReference.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapImplTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapLargeTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicMapTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EconomicSetTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/EquivalenceTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections.test/src/org/graalvm/collections/test/PairTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicMap.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicMapImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/EconomicSet.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/Equivalence.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/MapCursor.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/Pair.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableEconomicMap.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableEconomicSet.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/UnmodifiableMapCursor.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.collections/src/org/graalvm/collections/package-info.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/ModuleAPI.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/phases/CoreCompilerConfiguration.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/Management.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/.checkstyle_checks.xml src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotSuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HotSpotGraalMBeanTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CoreCompilerConfigurationFactory.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalMBean.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/OptionValuesAccess.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64ReadNode.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64ReadReplacementPhase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/JDK9Method.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual.bench/.checkstyle.exclude src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/.checkstyle_checks.xml src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/ComparableWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/LocationIdentity.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/Pointer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/PointerBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/SignedWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/UnsignedWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/WordBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.word/src/org/graalvm/word/WordFactory.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/ImplementedMethods.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/VisibleMemberMap.java test/hotspot/gtest/gc/g1/test_bufferingOopClosure.cpp test/hotspot/jtreg/runtime/SharedArchiveFile/DefaultUseWithClient.java test/hotspot/jtreg/runtime/SharedArchiveFile/org/omg/CORBA/Context.jasm test/hotspot/jtreg/runtime/appcds/SharedArchiveFile.java test/hotspot/jtreg/runtime/appcds/UseAppCDS.java test/hotspot/jtreg/runtime/appcds/javaldr/CheckAnonymousClass.java test/hotspot/jtreg/runtime/appcds/jigsaw/classpathtests/src/com/sun/tools/javac/Main2.jasm test/hotspot/jtreg/runtime/appcds/jigsaw/classpathtests/src/javax/activation/UnsupportedDataTypeException2.jasm test/hotspot/jtreg/runtime/appcds/jigsaw/overridetests/src/java.activation/javax/activation/UnsupportedDataTypeException.java test/hotspot/jtreg/runtime/appcds/jigsaw/overridetests/src/java.activation/module-info.java test/hotspot/jtreg/runtime/appcds/test-classes/javax/activation/MimeType.jasm test/hotspot/jtreg/runtime/constantPool/ACCModule52.java test/hotspot/jtreg/runtime/constantPool/ConstModule.java test/jdk/ProblemList.txt
diffstat 1382 files changed, 92765 insertions(+), 25569 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed May 02 15:11:54 2018 +0530
+++ b/.hgtags	Wed May 02 09:16:10 2018 -0700
@@ -482,3 +482,4 @@
 0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
 6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
 69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
+e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
--- a/make/CompileJavaModules.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/CompileJavaModules.gmk	Wed May 02 09:16:10 2018 -0700
@@ -439,7 +439,7 @@
     #
 
 jdk.internal.vm.compiler_EXCLUDES += \
-    org.graalvm.collections.test \
+    jdk.internal.vm.compiler.collections.test \
     org.graalvm.compiler.core.match.processor \
     org.graalvm.compiler.nodeinfo.processor \
     org.graalvm.compiler.options.processor \
--- a/make/CompileToolsHotspot.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/CompileToolsHotspot.gmk	Wed May 02 09:16:10 2018 -0700
@@ -47,8 +47,8 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.word/src \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.core/src \
           $(SRC_DIR)/org.graalvm.compiler.core.common/src \
           $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
@@ -102,7 +102,7 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.options/src \
           $(SRC_DIR)/org.graalvm.compiler.options.processor/src \
           $(SRC_DIR)/org.graalvm.util/src \
@@ -118,8 +118,8 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
-          $(SRC_DIR)/org.graalvm.word/src \
-          $(SRC_DIR)/org.graalvm.collections/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.word/src \
+          $(SRC_DIR)/jdk.internal.vm.compiler.collections/src \
           $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
           $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
           $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
--- a/make/Docs.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/Docs.gmk	Wed May 02 09:16:10 2018 -0700
@@ -429,7 +429,7 @@
 endif
 
 # All modules to have docs generated by docs-jdk-api target
-JDK_MODULES := $(sort $(DOCS_MODULES))
+JDK_MODULES := $(sort $(filter-out $(MODULES_FILTER), $(DOCS_MODULES)))
 
 $(eval $(call SetupApiDocsGeneration, JDK_API, \
     MODULES := $(JDK_MODULES), \
@@ -561,7 +561,7 @@
 JDK_SPECS_TARGETS += $(COPY_JDWP_PROTOCOL)
 
 # Get jvmti.html from the main jvm variant (all variants' jvmti.html are identical).
-JVMTI_HTML := $(HOTSPOT_OUTPUTDIR)/variant-$(JVM_VARIANT_MAIN)/gensrc/jvmtifiles/jvmti.html
+JVMTI_HTML ?= $(HOTSPOT_OUTPUTDIR)/variant-$(JVM_VARIANT_MAIN)/gensrc/jvmtifiles/jvmti.html
 $(eval $(call SetupCopyFiles, COPY_JVMTI_HTML, \
     FILES := $(JVMTI_HTML), \
     DEST := $(DOCS_OUTPUTDIR)/specs, \
--- a/make/GenerateLinkOptData.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/GenerateLinkOptData.gmk	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -61,11 +61,12 @@
 	$(call MakeDir, $(LINK_OPT_DIR))
 	$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $@))
 	$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $(JLI_TRACE_FILE)))
-	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@ \
+	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
 	    -Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
 	    -cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
 	    build.tools.classlist.HelloClasslist \
 	    $(LOG_DEBUG) 2>&1 > $(JLI_TRACE_FILE)
+	$(GREP) -v HelloClasslist $@.raw > $@
 
 # The jli trace is created by the same recipe as classlist. By declaring these
 # dependencies, make will correctly rebuild both jli trace and classlist
--- a/make/Images.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/Images.gmk	Wed May 02 09:16:10 2018 -0700
@@ -117,7 +117,7 @@
 
 JLINK_TOOL := $(JLINK) -J-Djlink.debug=true \
     --module-path $(IMAGES_OUTPUTDIR)/jmods \
-    --endian $(OPENJDK_BUILD_CPU_ENDIAN) \
+    --endian $(OPENJDK_TARGET_CPU_ENDIAN) \
     --release-info $(BASE_RELEASE_FILE) \
     --order-resources=$(call CommaList, $(JLINK_ORDER_RESOURCES)) \
     --dedup-legal-notices=error-if-not-same-content \
--- a/make/autoconf/flags-cflags.m4	Wed May 02 15:11:54 2018 +0530
+++ b/make/autoconf/flags-cflags.m4	Wed May 02 09:16:10 2018 -0700
@@ -128,16 +128,22 @@
   AC_ARG_ENABLE([warnings-as-errors], [AS_HELP_STRING([--disable-warnings-as-errors],
       [do not consider native warnings to be an error @<:@enabled@:>@])])
 
+  # Set default value.
+  if test "x$TOOLCHAIN_TYPE" = xxlc; then
+    WARNINGS_AS_ERRORS=false
+  else
+    WARNINGS_AS_ERRORS=true
+  fi
+
   AC_MSG_CHECKING([if native warnings are errors])
   if test "x$enable_warnings_as_errors" = "xyes"; then
     AC_MSG_RESULT([yes (explicitly set)])
     WARNINGS_AS_ERRORS=true
   elif test "x$enable_warnings_as_errors" = "xno"; then
-    AC_MSG_RESULT([no])
+    AC_MSG_RESULT([no (explicitly set)])
     WARNINGS_AS_ERRORS=false
   elif test "x$enable_warnings_as_errors" = "x"; then
-    AC_MSG_RESULT([yes (default)])
-    WARNINGS_AS_ERRORS=true
+    AC_MSG_RESULT([${WARNINGS_AS_ERRORS} (default)])
   else
     AC_MSG_ERROR([--enable-warnings-as-errors accepts no argument])
   fi
--- a/make/autoconf/flags.m4	Wed May 02 15:11:54 2018 +0530
+++ b/make/autoconf/flags.m4	Wed May 02 09:16:10 2018 -0700
@@ -233,15 +233,17 @@
   # The sysroot flags are needed for configure to be able to run the compilers
   FLAGS_SETUP_SYSROOT_FLAGS
 
+  # For solstudio and xlc, the word size flag is required for correct behavior.
+  # For clang/gcc, the flag is only strictly required for reduced builds, but
+  # set it always where possible (x86, sparc and ppc).
   if test "x$TOOLCHAIN_TYPE" = xxlc; then
     MACHINE_FLAG="-q${OPENJDK_TARGET_CPU_BITS}"
-  elif test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
-    if test "x$OPENJDK_TARGET_CPU" != xaarch64 &&
-       test "x$OPENJDK_TARGET_CPU" != xarm &&
-       test "x$OPENJDK_TARGET_CPU" != xmips &&
-       test "x$OPENJDK_TARGET_CPU" != xmipsel &&
-       test "x$OPENJDK_TARGET_CPU" != xmips64 &&
-       test "x$OPENJDK_TARGET_CPU" != xmips64el; then
+  elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
+    MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
+  elif test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
+    if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86 ||
+        test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc ||
+        test "x$OPENJDK_TARGET_CPU_ARCH" = xppc; then
       MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
     fi
   fi
--- a/make/autoconf/jdk-options.m4	Wed May 02 15:11:54 2018 +0530
+++ b/make/autoconf/jdk-options.m4	Wed May 02 09:16:10 2018 -0700
@@ -238,6 +238,9 @@
   if test "x$OPENJDK_TARGET_OS" = xaix ; then
     INCLUDE_SA=false
   fi
+  if test "x$OPENJDK_TARGET_CPU" = xs390x ; then
+    INCLUDE_SA=false
+  fi
   AC_SUBST(INCLUDE_SA)
 
   # Compress jars
--- a/make/hotspot/lib/JvmFeatures.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/hotspot/lib/JvmFeatures.gmk	Wed May 02 09:16:10 2018 -0700
@@ -32,7 +32,7 @@
 ifeq ($(call check-jvm-feature, compiler1), true)
   JVM_CFLAGS_FEATURES += -DCOMPILER1
 else
-  JVM_EXCLUDE_PATTERNS += c1_
+  JVM_EXCLUDE_PATTERNS += c1_ c1/
 endif
 
 ifeq ($(call check-jvm-feature, compiler2), true)
--- a/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -318,16 +318,17 @@
         }
         for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
             String key = it.next();
-            if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
+                if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
                     || key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
                 @SuppressWarnings("unchecked")
                 Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
+
                 // Convert key/value pairs to an array.
                 String[] names = new String[ZONE_NAME_KEYS.length];
                 int ix = 0;
                 for (String nameKey : ZONE_NAME_KEYS) {
                     String name = nameMap.get(nameKey);
-                    if (name == null) {
+                    if (name == null && parentsMap != null) {
                         @SuppressWarnings("unchecked")
                         Map<String, String> parentNames = (Map<String, String>) parentsMap.get(key);
                         if (parentNames != null) {
@@ -357,29 +358,6 @@
                             }
                         }
                     }
-                    // If there are still any nulls, try filling in them from en data.
-                    if (hasNulls(names) && !id.equals("en")) {
-                        @SuppressWarnings("unchecked")
-                        String[] enNames = (String[]) Bundle.getBundle("en").getTargetMap().get(key);
-                        if (enNames == null) {
-                            if (metaKey != null) {
-                                @SuppressWarnings("unchecked")
-                                String[] metaNames = (String[]) Bundle.getBundle("en").getTargetMap().get(metaKey);
-                                enNames = metaNames;
-                            }
-                        }
-                        if (enNames != null) {
-                            for (int i = 0; i < names.length; i++) {
-                                if (names[i] == null) {
-                                    names[i] = enNames[i];
-                                }
-                            }
-                        }
-                        // If there are still nulls, give up names.
-                        if (hasNulls(names)) {
-                            names = null;
-                        }
-                    }
                 }
                 // replace the Map with the array
                 if (names != null) {
@@ -662,12 +640,12 @@
                     if (CLDRConverter.handlerMetaZones.get(tz).equals(meta)) {
                         tzid = tz;
                         break;
-                        }
                     }
                 }
+            }
         } else {
             tzid = key.substring(CLDRConverter.TIMEZONE_ID_PREFIX.length());
-    }
+        }
 
         if (tzid != null) {
             for (Object[] jreZone : jreTimeZoneNames) {
@@ -676,13 +654,13 @@
                         if (map.get(ZONE_NAME_KEYS[i]) == null) {
                             String[] jreNames = (String[])jreZone[1];
                             map.put(ZONE_NAME_KEYS[i], jreNames[i]);
+                        }
+                    }
+                    break;
                 }
             }
-                    break;
         }
     }
-            }
-        }
 
     private void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
         switch (cldrLetter) {
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Wed May 02 09:16:10 2018 -0700
@@ -31,6 +31,7 @@
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.nio.file.*;
+import java.text.MessageFormat;
 import java.time.*;
 import java.util.*;
 import java.util.ResourceBundle.Control;
@@ -82,13 +83,15 @@
     static final String CALENDAR_FIRSTDAY_PREFIX = "firstDay.";
     static final String CALENDAR_MINDAYS_PREFIX = "minDays.";
     static final String TIMEZONE_ID_PREFIX = "timezone.id.";
+    static final String EXEMPLAR_CITY_PREFIX = "timezone.excity.";
     static final String ZONE_NAME_PREFIX = "timezone.displayname.";
     static final String METAZONE_ID_PREFIX = "metazone.id.";
     static final String PARENT_LOCALE_PREFIX = "parentLocale.";
+    static final String[] EMPTY_ZONE = {"", "", "", "", "", ""};
 
     private static SupplementDataParseHandler handlerSuppl;
-    private static SupplementalMetadataParseHandler handlerSupplMeta;
     private static LikelySubtagsParseHandler handlerLikelySubtags;
+    static SupplementalMetadataParseHandler handlerSupplMeta;
     static NumberingSystemsParseHandler handlerNumbering;
     static MetaZonesParseHandler handlerMetaZones;
     static TimeZoneParseHandler handlerTimeZone;
@@ -425,7 +428,7 @@
         parseLDMLFile(new File(LIKELYSUBTAGS_SOURCE_FILE), handlerLikelySubtags);
 
         // Parse supplementalMetadata
-        // Currently only interested in deprecated time zone ids.
+        // Currently interested in deprecated time zone ids and language aliases.
         handlerSupplMeta = new SupplementalMetadataParseHandler();
         parseLDMLFile(new File(SPPL_META_SOURCE_FILE), handlerSupplMeta);
     }
@@ -662,23 +665,18 @@
                                 Arrays.deepEquals(data,
                                     (String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
                             .findAny();
-                    if (cldrMeta.isPresent()) {
-                        names.put(tzid, cldrMeta.get().getValue());
-                    } else {
+                    cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
                         // check the JRE meta key, add if there is not.
                         Optional<Map.Entry<String[], String>> jreMeta =
                             jreMetaMap.entrySet().stream()
                                 .filter(jm -> Arrays.deepEquals(data, jm.getKey()))
                                 .findAny();
-                        if (jreMeta.isPresent()) {
-                            names.put(tzid, jreMeta.get().getValue());
-                        } else {
-                            String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
-                            names.put(METAZONE_ID_PREFIX + metaName, data);
-                            names.put(tzid, metaName);
-                            jreMetaMap.put(data, metaName);
-                        }
-                    }
+                        jreMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
+                                String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
+                                names.put(METAZONE_ID_PREFIX + metaName, data);
+                                names.put(tzid, metaName);
+                        });
+                    });
                 }
             });
         }
@@ -705,6 +703,26 @@
             }
         });
 
+        // exemplar cities.
+        Map<String, Object> exCities = map.entrySet().stream()
+                .filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
+                .collect(Collectors
+                        .toMap(Map.Entry::getKey, Map.Entry::getValue));
+        names.putAll(exCities);
+
+        if (!id.equals("en") &&
+            !names.isEmpty()) {
+            // CLDR does not have UTC entry, so add it here.
+            names.put("UTC", EMPTY_ZONE);
+
+            // no metazone zones
+            Arrays.asList(handlerMetaZones.get(MetaZonesParseHandler.NO_METAZONE_KEY)
+                .split("\\s")).stream()
+                .forEach(tz -> {
+                    names.put(tz, EMPTY_ZONE);
+                });
+        }
+
         return names;
     }
 
@@ -769,6 +787,10 @@
         "field.hour",
         "timezone.hourFormat",
         "timezone.gmtFormat",
+        "timezone.gmtZeroFormat",
+        "timezone.regionFormat",
+        "timezone.regionFormat.daylight",
+        "timezone.regionFormat.standard",
         "field.minute",
         "field.second",
         "field.zone",
--- a/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,19 +103,30 @@
         case "key":
             // for LocaleNames
             // copy string
-            pushStringEntry(qName, attributes,
-                CLDRConverter.LOCALE_KEY_PREFIX +
-                convertOldKeyName(attributes.getValue("type")));
+            {
+                String key = convertOldKeyName(attributes.getValue("type"));
+                if (key.length() == 2) {
+                    pushStringEntry(qName, attributes,
+                        CLDRConverter.LOCALE_KEY_PREFIX + key);
+                } else {
+                    pushIgnoredContainer(qName);
+                }
+            }
             break;
 
         case "type":
             // for LocaleNames/CalendarNames
             // copy string
-            pushStringEntry(qName, attributes,
-                CLDRConverter.LOCALE_TYPE_PREFIX +
-                convertOldKeyName(attributes.getValue("key")) + "." +
-                attributes.getValue("type"));
-
+            {
+                String key = convertOldKeyName(attributes.getValue("key"));
+                if (key.length() == 2) {
+                    pushStringEntry(qName, attributes,
+                    CLDRConverter.LOCALE_TYPE_PREFIX + key + "." +
+                    attributes.getValue("type"));
+                } else {
+                    pushIgnoredContainer(qName);
+                }
+            }
             break;
 
         //
@@ -445,6 +456,16 @@
         case "gmtFormat":
             pushStringEntry(qName, attributes, "timezone.gmtFormat");
             break;
+        case "gmtZeroFormat":
+            pushStringEntry(qName, attributes, "timezone.gmtZeroFormat");
+            break;
+        case "regionFormat":
+            {
+                String type = attributes.getValue("type");
+                pushStringEntry(qName, attributes, "timezone.regionFormat" +
+                    (type == null ? "" : "." + type));
+            }
+            break;
         case "zone":
             {
                 String tzid = attributes.getValue("type"); // Olson tz id
@@ -474,8 +495,8 @@
         case "daylight": // daylight saving (summer) time name
             pushStringEntry(qName, attributes, CLDRConverter.ZONE_NAME_PREFIX + qName + "." + zoneNameStyle);
             break;
-        case "exemplarCity":  // not used in JDK
-            pushIgnoredContainer(qName);
+        case "exemplarCity":
+            pushStringEntry(qName, attributes, CLDRConverter.EXEMPLAR_CITY_PREFIX);
             break;
 
         //
@@ -877,11 +898,16 @@
         case "generic":
         case "standard":
         case "daylight":
+        case "exemplarCity":
             if (zonePrefix != null && (currentContainer instanceof Entry)) {
                 @SuppressWarnings("unchecked")
                 Map<String, String> valmap = (Map<String, String>) get(zonePrefix + getContainerKey());
                 Entry<?> entry = (Entry<?>) currentContainer;
-                valmap.put(entry.getKey(), (String) entry.getValue());
+                if (qName.equals("exemplarCity")) {
+                    put(CLDRConverter.EXEMPLAR_CITY_PREFIX + getContainerKey(), (String) entry.getValue());
+                } else {
+                    valmap.put(entry.getKey(), (String) entry.getValue());
+                }
             }
             break;
 
--- a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java	Wed May 02 09:16:10 2018 -0700
@@ -35,6 +35,8 @@
 import org.xml.sax.SAXException;
 
 class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
+    final static String NO_METAZONE_KEY = "no.metazone.defined";
+
     private String tzid, metazone;
 
     // for java.time.format.ZoneNames.java
@@ -101,10 +103,17 @@
         assert qName.equals(currentContainer.getqName()) : "current=" + currentContainer.getqName() + ", param=" + qName;
         switch (qName) {
         case "timezone":
-            if (tzid == null || metazone == null) {
+            if (tzid == null) {
                 throw new InternalError();
+            } else if (metazone == null) {
+                String no_meta = get(NO_METAZONE_KEY);
+                put(NO_METAZONE_KEY, no_meta == null ? tzid : no_meta + " " + tzid);
+                CLDRConverter.info("No metazone defined for %s%n", tzid);
+            } else {
+                put(tzid, metazone);
             }
-            put(tzid, metazone);
+            tzid = null;
+            metazone = null;
             break;
         }
         currentContainer = currentContainer.getParent();
--- a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -211,11 +211,13 @@
                     if (value == null) {
                         CLDRConverter.warning("null value for " + key);
                     } else if (value instanceof String) {
-                        if (type == BundleType.TIMEZONE ||
-                            ((String)value).startsWith(META_VALUE_PREFIX)) {
-                            out.printf("            { \"%s\", %s },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+                        String valStr = (String)value;
+                        if (type == BundleType.TIMEZONE &&
+                            !key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
+                            valStr.startsWith(META_VALUE_PREFIX)) {
+                            out.printf("            { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava));
                         } else {
-                            out.printf("            { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+                            out.printf("            { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert(valStr, useJava));
                         }
                     } else if (value instanceof String[]) {
                         String[] values = (String[]) value;
@@ -268,7 +270,8 @@
             out.printf("public class %s implements LocaleDataMetaInfo {\n", className);
             out.printf("    private static final Map<String, String> resourceNameToLocales = new HashMap<>();\n" +
                        (CLDRConverter.isBaseModule ?
-                       "    private static final Map<Locale, String[]> parentLocalesMap = new HashMap<>();\n\n" :
+                       "    private static final Map<Locale, String[]> parentLocalesMap = new HashMap<>();\n" +
+                       "    private static final Map<String, String> languageAliasMap = new HashMap<>();\n\n" :
                        "\n") +
                        "    static {\n");
 
@@ -299,24 +302,35 @@
                 } else {
                     if ("AvailableLocales".equals(key)) {
                         out.printf("        resourceNameToLocales.put(\"%s\",\n", key);
-                        out.printf("              \"%s\");\n", toLocaleList(metaInfo.get(key), false));
+                        out.printf("              \"%s\");\n", toLocaleList(applyLanguageAliases(metaInfo.get(key)), false));
                     }
                 }
             }
+            // for languageAliasMap
+            if (CLDRConverter.isBaseModule) {
+                CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> {
+                    out.printf("                languageAliasMap.put(\"%s\", \"%s\");\n", key, value);
+                });
+            }
 
             out.printf("    }\n\n");
 
             // end of static initializer block.
 
-            // Short TZ names for delayed initialization
+            // Canonical TZ names for delayed initialization
             if (CLDRConverter.isBaseModule) {
-                out.printf("    private static class TZShortIDMapHolder {\n");
-                out.printf("        static final Map<String, String> tzShortIDMap = new HashMap<>();\n");
+                out.printf("    private static class TZCanonicalIDMapHolder {\n");
+                out.printf("        static final Map<String, String> tzCanonicalIDMap = new HashMap<>(600);\n");
                 out.printf("        static {\n");
                 CLDRConverter.handlerTimeZone.getData().entrySet().stream()
                     .forEach(e -> {
-                        out.printf("            tzShortIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
-                                ((String)e.getValue()));
+                        String[] ids = ((String)e.getValue()).split("\\s");
+                        out.printf("            tzCanonicalIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
+                                ids[0]);
+                        for (int i = 1; i < ids.length; i++) {
+                            out.printf("            tzCanonicalIDMap.put(\"%s\", \"%s\");\n", ids[i],
+                                ids[0]);
+                        }
                     });
                 out.printf("        }\n    }\n\n");
             }
@@ -333,8 +347,12 @@
 
             if (CLDRConverter.isBaseModule) {
                 out.printf("    @Override\n" +
-                           "    public Map<String, String> tzShortIDs() {\n" +
-                           "        return TZShortIDMapHolder.tzShortIDMap;\n" +
+                           "    public Map<String, String> getLanguageAliasMap() {\n" +
+                           "        return languageAliasMap;\n" +
+                           "    }\n\n");
+                out.printf("    @Override\n" +
+                           "    public Map<String, String> tzCanonicalIDs() {\n" +
+                           "        return TZCanonicalIDMapHolder.tzCanonicalIDMap;\n" +
                            "    }\n\n");
                 out.printf("    public Map<Locale, String[]> parentLocales() {\n" +
                            "        return parentLocalesMap;\n" +
@@ -370,4 +388,13 @@
         }
         return sb.toString();
     }
+
+    private static SortedSet<String> applyLanguageAliases(SortedSet<String> tags) {
+        CLDRConverter.handlerSupplMeta.getLanguageAliasData().forEach((key, value) -> {
+            if (tags.remove(key)) {
+                tags.add(value);
+            }
+        });
+        return tags;
+    }
 }
--- a/make/jdk/src/classes/build/tools/cldrconverter/SupplementalMetadataParseHandler.java	Wed May 02 15:11:54 2018 +0530
+++ b/make/jdk/src/classes/build/tools/cldrconverter/SupplementalMetadataParseHandler.java	Wed May 02 09:16:10 2018 -0700
@@ -27,6 +27,8 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.stream.Stream;
 import org.xml.sax.Attributes;
 import org.xml.sax.InputSource;
@@ -38,6 +40,12 @@
  */
 
 class SupplementalMetadataParseHandler extends AbstractLDMLHandler<Object> {
+    private final Map<String, String> languageAliasMap;
+
+    SupplementalMetadataParseHandler() {
+        languageAliasMap = new HashMap<>();
+    }
+
     @Override
     public InputSource resolveEntity(String publicID, String systemID) throws IOException, SAXException {
         // avoid HTTP traffic to unicode.org
@@ -57,6 +65,17 @@
             }
             pushIgnoredContainer(qName);
             break;
+        case "languageAlias":
+            String aliasReason = attributes.getValue("reason");
+            if ("deprecated".equals(aliasReason) || "legacy".equals(aliasReason)) {
+                String tag = attributes.getValue("type");
+                if (!checkLegacyLocales(tag)) {
+                   languageAliasMap.put(tag.replaceAll("_", "-"),
+                   attributes.getValue("replacement").replaceAll("_", "-"));
+                }
+            }
+            pushIgnoredContainer(qName);
+            break;
         default:
             // treat anything else as a container
             pushContainer(qName, attributes);
@@ -69,4 +88,13 @@
                 .map(k -> String.format("        \"%s\", \"%s\",", k, get(k)))
                 .sorted();
     }
+    Map<String, String> getLanguageAliasData() {
+        return languageAliasMap;
+    }
+
+    // skip language aliases for JDK legacy locales for ISO compatibility
+    private boolean checkLegacyLocales(String tag) {
+        return (tag.startsWith("no") || tag.startsWith("in")
+                || tag.startsWith("iw") || tag.startsWith("ji"));
+    }
 }
--- a/make/launcher/Launcher-jdk.pack.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/launcher/Launcher-jdk.pack.gmk	Wed May 02 09:16:10 2018 -0700
@@ -88,7 +88,6 @@
     CFLAGS_solaris := -KPIC, \
     CFLAGS_macosx := -fPIC, \
     DISABLED_WARNINGS_gcc := unused-result implicit-fallthrough, \
-    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(UNPACKEXE_ZIPOBJS) \
         $(LDFLAGS_JDKEXE) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/lib/Lib-jdk.pack.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/lib/Lib-jdk.pack.gmk	Wed May 02 09:16:10 2018 -0700
@@ -40,7 +40,6 @@
         $(LIBJAVA_HEADER_FLAGS), \
     CFLAGS_release := -DPRODUCT, \
     DISABLED_WARNINGS_gcc := implicit-fallthrough, \
-    DISABLED_WARNINGS_microsoft := 4005, \
     LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_windows := -map:$(SUPPORT_OUTPUTDIR)/native/$(MODULE)/unpack.map -debug, \
--- a/make/test/JtregNativeHotspot.gmk	Wed May 02 15:11:54 2018 +0530
+++ b/make/test/JtregNativeHotspot.gmk	Wed May 02 09:16:10 2018 -0700
@@ -48,6 +48,21 @@
 
 BUILD_HOTSPOT_JTREG_IMAGE_DIR := $(TEST_IMAGE_DIR)/hotspot/jtreg
 
+################################################################################
+# Former VM TestBase tests.
+################################################################################
+
+VM_TESTBASE_DIR := $(TOPDIR)/test/hotspot/jtreg/vmTestbase
+
+VM_SHARE_INCLUDES := \
+    -I$(VM_TESTBASE_DIR)/vm/share \
+    -I$(VM_TESTBASE_DIR)/nsk/share/native \
+    -I$(VM_TESTBASE_DIR)/nsk/share/jni
+
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
+
+################################################################################
+
 # Platform specific setup
 ifneq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc)
   BUILD_HOTSPOT_JTREG_EXCLUDE += liboverflow.c exeThreadSignalMask.c
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -32,9 +32,6 @@
 #include "nativeInst_aarch64.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "vmreg_aarch64.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif
 
 
 #define __ ce->masm()->
@@ -350,42 +347,4 @@
   __ b(_continuation);
 }
 
-
-/////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-  // At this point we know that marking is in progress.
-  // If do_load() is true then we have to emit the
-  // load of the previous value; otherwise it has already
-  // been loaded into _pre_val.
-
-  __ bind(_entry);
-  assert(pre_val()->is_register(), "Precondition.");
-
-  Register pre_val_reg = pre_val()->as_register();
-
-  if (do_load()) {
-    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
-  }
-  __ cbz(pre_val_reg, _continuation);
-  ce->store_parameter(pre_val()->as_register(), 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
-  __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
-  __ bind(_entry);
-  assert(addr()->is_register(), "Precondition.");
-  assert(new_val()->is_register(), "Precondition.");
-  Register new_val_reg = new_val()->as_register();
-  __ cbz(new_val_reg, _continuation);
-  ce->store_parameter(addr()->as_pointer_register(), 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
-  __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-/////////////////////////////////////////////////////////////////////////////
-
 #undef __
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -1558,7 +1558,16 @@
 
 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
   assert(VM_Version::supports_cx8(), "wrong machine");
-  Register addr = as_reg(op->addr());
+  Register addr;
+  if (op->addr()->is_register()) {
+    addr = as_reg(op->addr());
+  } else {
+    assert(op->addr()->is_address(), "what else?");
+    LIR_Address* addr_ptr = op->addr()->as_address_ptr();
+    assert(addr_ptr->disp() == 0, "need 0 disp");
+    assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
+    addr = as_reg(addr_ptr->base());
+  }
   Register newval = as_reg(op->new_value());
   Register cmpval = as_reg(op->cmp_value());
   Label succeed, fail, around;
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -144,8 +144,22 @@
 
   // accumulate fixed displacements
   if (index->is_constant()) {
-    large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
-    index = LIR_OprFact::illegalOpr;
+    LIR_Const *constant = index->as_constant_ptr();
+    if (constant->type() == T_INT) {
+      large_disp += index->as_jint() << shift;
+    } else {
+      assert(constant->type() == T_LONG, "should be");
+      jlong c = index->as_jlong() << shift;
+      if ((jlong)((jint)c) == c) {
+        large_disp += c;
+        index = LIR_OprFact::illegalOpr;
+      } else {
+        LIR_Opr tmp = new_register(T_LONG);
+        __ move(index, tmp);
+        index = tmp;
+        // apply shift and displacement below
+      }
+    }
   }
 
   if (index->is_register()) {
@@ -183,9 +197,8 @@
   }
 }
 
-
 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
-                                              BasicType type, bool needs_card_mark) {
+                                              BasicType type) {
   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
   int elem_size = type2aelembytes(type);
   int shift = exact_log2(elem_size);
@@ -206,16 +219,7 @@
                             LIR_Address::scale(type),
                             offset_in_bytes, type);
   }
-  if (needs_card_mark) {
-    // This store will need a precise card mark, so go ahead and
-    // compute the full adddres instead of computing once for the
-    // store and again for the card mark.
-    LIR_Opr tmp = new_pointer_register();
-    __ leal(LIR_OprFact::address(addr), tmp);
-    return new LIR_Address(tmp, type);
-  } else {
-    return addr;
-  }
+  return addr;
 }
 
 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@@ -305,87 +309,17 @@
   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 }
 
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+    LIR_Opr tmp1 = new_register(objectType);
+    LIR_Opr tmp2 = new_register(objectType);
+    LIR_Opr tmp3 = new_register(objectType);
+    __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
 //----------------------------------------------------------------------
 //             visitor functions
 //----------------------------------------------------------------------
 
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_pinned(),"");
-  bool needs_range_check = x->compute_needs_range_check();
-  bool use_length = x->length() != NULL;
-  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
-                                         !get_jobject_constant(x->value())->is_null_object() ||
-                                         x->should_profile());
-
-  LIRItem array(x->array(), this);
-  LIRItem index(x->index(), this);
-  LIRItem value(x->value(), this);
-  LIRItem length(this);
-
-  array.load_item();
-  index.load_nonconstant();
-
-  if (use_length && needs_range_check) {
-    length.set_instruction(x->length());
-    length.load_item();
-
-  }
-  if (needs_store_check || x->check_boolean()) {
-    value.load_item();
-  } else {
-    value.load_for_store(x->elt_type());
-  }
-
-  set_no_result(x);
-
-  // the CodeEmitInfo must be duplicated for each different
-  // LIR-instruction because spilling can occur anywhere between two
-  // instructions and so the debug information must be different
-  CodeEmitInfo* range_check_info = state_for(x);
-  CodeEmitInfo* null_check_info = NULL;
-  if (x->needs_null_check()) {
-    null_check_info = new CodeEmitInfo(range_check_info);
-  }
-
-  // emit array address setup early so it schedules better
-  // FIXME?  No harm in this on aarch64, and it might help
-  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
-  if (GenerateRangeChecks && needs_range_check) {
-    if (use_length) {
-      __ cmp(lir_cond_belowEqual, length.result(), index.result());
-      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
-    } else {
-      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
-      // range_check also does the null check
-      null_check_info = NULL;
-    }
-  }
-
-  if (GenerateArrayStoreCheck && needs_store_check) {
-    LIR_Opr tmp1 = new_register(objectType);
-    LIR_Opr tmp2 = new_register(objectType);
-    LIR_Opr tmp3 = new_register(objectType);
-
-    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
-    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
-  }
-
-  if (obj_store) {
-    // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-    __ move(value.result(), array_addr, null_check_info);
-    // Seems to be a precise
-    post_barrier(LIR_OprFact::address(array_addr), value.result());
-  } else {
-    LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
-    __ move(result, array_addr, null_check_info);
-  }
-}
-
 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
   assert(x->is_pinned(),"");
   LIRItem obj(x->obj(), this);
@@ -771,76 +705,42 @@
   }
 }
 
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
-  assert(x->number_of_arguments() == 4, "wrong type");
-  LIRItem obj   (x->argument_at(0), this);  // object
-  LIRItem offset(x->argument_at(1), this);  // offset of field
-  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
-  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
+  new_value.load_item();
+  cmp_value.load_item();
+  LIR_Opr result = new_register(T_INT);
+  if (type == T_OBJECT || type == T_ARRAY) {
+    __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
+  } else if (type == T_INT) {
+    __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+  } else if (type == T_LONG) {
+    __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+  } else {
+    ShouldNotReachHere();
+    Unimplemented();
+  }
+  __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
+  return result;
+}
 
-  assert(obj.type()->tag() == objectTag, "invalid type");
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+  bool is_oop = type == T_OBJECT || type == T_ARRAY;
+  LIR_Opr result = new_register(type);
+  value.load_item();
+  assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+  LIR_Opr tmp = new_register(T_INT);
+  __ xchg(addr, value.result(), result, tmp);
+  return result;
+}
 
-  // In 64bit the type can be long, sparc doesn't have this assert
-  // assert(offset.type()->tag() == intTag, "invalid type");
-
-  assert(cmp.type()->tag() == type->tag(), "invalid type");
-  assert(val.type()->tag() == type->tag(), "invalid type");
-
-  // get address of field
-  obj.load_item();
-  offset.load_nonconstant();
-  val.load_item();
-  cmp.load_item();
-
-  LIR_Address* a;
-  if(offset.result()->is_constant()) {
-    jlong c = offset.result()->as_jlong();
-    if ((jlong)((jint)c) == c) {
-      a = new LIR_Address(obj.result(),
-                          (jint)c,
-                          as_BasicType(type));
-    } else {
-      LIR_Opr tmp = new_register(T_LONG);
-      __ move(offset.result(), tmp);
-      a = new LIR_Address(obj.result(),
-                          tmp,
-                          as_BasicType(type));
-    }
-  } else {
-    a = new LIR_Address(obj.result(),
-                        offset.result(),
-                        0,
-                        as_BasicType(type));
-  }
-  LIR_Opr addr = new_pointer_register();
-  __ leal(LIR_OprFact::address(a), addr);
-
-  if (type == objectType) {  // Write-barrier needed for Object fields.
-    // Do the pre-write barrier, if any.
-    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-  }
-
-  LIR_Opr result = rlock_result(x);
-
-  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
-  if (type == objectType)
-    __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
-               result);
-  else if (type == intType)
-    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
-  else if (type == longType)
-    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
-  else {
-    ShouldNotReachHere();
-  }
-
-  __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
-
-  if (type == objectType) {   // Write-barrier needed for Object fields.
-    // Seems to be precise
-    post_barrier(addr, val.result());
-  }
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+  LIR_Opr result = new_register(type);
+  value.load_item();
+  assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
+  LIR_Opr tmp = new_register(T_INT);
+  __ xadd(addr, value.result(), result, tmp);
+  return result;
 }
 
 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
@@ -1287,7 +1187,7 @@
   LIRItem obj(x->obj(), this);
 
   CodeEmitInfo* patching_info = NULL;
-  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
+  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
     // must do this before locking the destination register as an oop register,
     // and before the obj is loaded (the latter is for deoptimization)
     patching_info = state_for(x, x->state_before());
@@ -1433,84 +1333,3 @@
 
   __ volatile_load_mem_reg(address, result, info);
 }
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
-                                     BasicType type, bool is_volatile) {
-  LIR_Address* addr = new LIR_Address(src, offset, type);
-  __ load(addr, dst);
-}
-
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
-                                     BasicType type, bool is_volatile) {
-  LIR_Address* addr = new LIR_Address(src, offset, type);
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-  if (is_obj) {
-    // Do the pre-write barrier, if any.
-    pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-    __ move(data, addr);
-    assert(src->is_register(), "must be register");
-    // Seems to be a precise address
-    post_barrier(LIR_OprFact::address(addr), data);
-  } else {
-    __ move(data, addr);
-  }
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
-  BasicType type = x->basic_type();
-  LIRItem src(x->object(), this);
-  LIRItem off(x->offset(), this);
-  LIRItem value(x->value(), this);
-
-  src.load_item();
-  off.load_nonconstant();
-
-  // We can cope with a constant increment in an xadd
-  if (! (x->is_add()
-         && value.is_constant()
-         && can_inline_as_constant(x->value()))) {
-    value.load_item();
-  }
-
-  LIR_Opr dst = rlock_result(x, type);
-  LIR_Opr data = value.result();
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-  LIR_Opr offset = off.result();
-
-  if (data == dst) {
-    LIR_Opr tmp = new_register(data->type());
-    __ move(data, tmp);
-    data = tmp;
-  }
-
-  LIR_Address* addr;
-  if (offset->is_constant()) {
-    jlong l = offset->as_jlong();
-    assert((jlong)((jint)l) == l, "offset too large for constant");
-    jint c = (jint)l;
-    addr = new LIR_Address(src.result(), c, type);
-  } else {
-    addr = new LIR_Address(src.result(), offset, type);
-  }
-
-  LIR_Opr tmp = new_register(T_INT);
-  LIR_Opr ptr = LIR_OprFact::illegalOpr;
-
-  if (x->is_add()) {
-    __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
-  } else {
-    if (is_obj) {
-      // Do the pre-write barrier, if any.
-      ptr = new_pointer_register();
-      __ add(src.result(), off.result(), ptr);
-      pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
-                  true /* do_load */, false /* patch */, NULL);
-    }
-    __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
-    if (is_obj) {
-      post_barrier(ptr, data);
-    }
-  }
-}
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -358,6 +358,16 @@
 void C1_MacroAssembler::verified_entry() {
 }
 
+void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
+  // rbp, + 0: link
+  //     + 1: return address
+  //     + 2: argument with offset 0
+  //     + 3: argument with offset 1
+  //     + 4: ...
+
+  ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
+}
+
 #ifndef PRODUCT
 
 void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp	Wed May 02 09:16:10 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -109,4 +109,6 @@
   // This platform only uses signal-based null checks. The Label is not needed.
   void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
 
+  void load_parameter(int offset_in_words, Register reg);
+
 #endif // CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -43,11 +43,6 @@
 #include "runtime/vframe.hpp"
 #include "runtime/vframeArray.hpp"
 #include "vmreg_aarch64.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
 
 
 // Implementation of StubAssembler
@@ -173,31 +168,32 @@
   ~StubFrame();
 };;
 
+void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
+  set_info(name, must_gc_arguments);
+  enter();
+}
+
+void StubAssembler::epilogue() {
+  leave();
+  ret(lr);
+}
 
 #define __ _sasm->
 
 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
   _sasm = sasm;
-  __ set_info(name, must_gc_arguments);
-  __ enter();
+  __ prologue(name, must_gc_arguments);
 }
 
 // load parameters that were stored with LIR_Assembler::store_parameter
 // Note: offsets for store_parameter and load_argument must match
 void StubFrame::load_argument(int offset_in_words, Register reg) {
-  // rbp, + 0: link
-  //     + 1: return address
-  //     + 2: argument with offset 0
-  //     + 3: argument with offset 1
-  //     + 4: ...
-
-  __ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
+  __ load_parameter(offset_in_words, reg);
 }
 
 
 StubFrame::~StubFrame() {
-  __ leave();
-  __ ret(lr);
+  __ epilogue();
 }
 
 #undef __
@@ -1100,136 +1096,6 @@
       }
       break;
 
-#if INCLUDE_ALL_GCS
-
-    case g1_pre_barrier_slow_id:
-      {
-        StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
-        // arg0 : previous value of memory
-
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ mov(r0, (int)id);
-          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
-          __ should_not_reach_here();
-          break;
-        }
-
-        const Register pre_val = r0;
-        const Register thread = rthread;
-        const Register tmp = rscratch1;
-
-        Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
-        Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
-        Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
-
-        Label done;
-        Label runtime;
-
-        // Is marking still active?
-        if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
-          __ ldrw(tmp, in_progress);
-        } else {
-          assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
-          __ ldrb(tmp, in_progress);
-        }
-        __ cbzw(tmp, done);
-
-        // Can we store original value in the thread's buffer?
-        __ ldr(tmp, queue_index);
-        __ cbz(tmp, runtime);
-
-        __ sub(tmp, tmp, wordSize);
-        __ str(tmp, queue_index);
-        __ ldr(rscratch2, buffer);
-        __ add(tmp, tmp, rscratch2);
-        f.load_argument(0, rscratch2);
-        __ str(rscratch2, Address(tmp, 0));
-        __ b(done);
-
-        __ bind(runtime);
-        __ push_call_clobbered_registers();
-        f.load_argument(0, pre_val);
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
-        __ pop_call_clobbered_registers();
-        __ bind(done);
-      }
-      break;
-    case g1_post_barrier_slow_id:
-      {
-        StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
-
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ mov(r0, (int)id);
-          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
-          __ should_not_reach_here();
-          break;
-        }
-
-        // arg0: store_address
-        Address store_addr(rfp, 2*BytesPerWord);
-
-        Label done;
-        Label runtime;
-
-        // At this point we know new_value is non-NULL and the new_value crosses regions.
-        // Must check to see if card is already dirty
-
-        const Register thread = rthread;
-
-        Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
-        Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
-
-        const Register card_offset = rscratch2;
-        // LR is free here, so we can use it to hold the byte_map_base.
-        const Register byte_map_base = lr;
-
-        assert_different_registers(card_offset, byte_map_base, rscratch1);
-
-        f.load_argument(0, card_offset);
-        __ lsr(card_offset, card_offset, CardTable::card_shift);
-        __ load_byte_map_base(byte_map_base);
-        __ ldrb(rscratch1, Address(byte_map_base, card_offset));
-        __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
-        __ br(Assembler::EQ, done);
-
-        assert((int)CardTable::dirty_card_val() == 0, "must be 0");
-
-        __ membar(Assembler::StoreLoad);
-        __ ldrb(rscratch1, Address(byte_map_base, card_offset));
-        __ cbzw(rscratch1, done);
-
-        // storing region crossing non-NULL, card is clean.
-        // dirty card and log.
-        __ strb(zr, Address(byte_map_base, card_offset));
-
-        // Convert card offset into an address in card_addr
-        Register card_addr = card_offset;
-        __ add(card_addr, byte_map_base, card_addr);
-
-        __ ldr(rscratch1, queue_index);
-        __ cbz(rscratch1, runtime);
-        __ sub(rscratch1, rscratch1, wordSize);
-        __ str(rscratch1, queue_index);
-
-        // Reuse LR to hold buffer_addr
-        const Register buffer_addr = lr;
-
-        __ ldr(buffer_addr, buffer);
-        __ str(card_addr, Address(buffer_addr, rscratch1));
-        __ b(done);
-
-        __ bind(runtime);
-        __ push_call_clobbered_registers();
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
-        __ pop_call_clobbered_registers();
-        __ bind(done);
-
-      }
-      break;
-#endif
-
     case predicate_failed_trap_id:
       {
         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -24,6 +24,9 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
 #include "gc/g1/g1CardTable.hpp"
@@ -307,4 +310,167 @@
 
 }
 
+#ifdef COMPILER1
+
 #undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
+  __ bind(*stub->entry());
+
+  assert(stub->pre_val()->is_register(), "Precondition.");
+
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+  __ cbz(pre_val_reg, *stub->continuation());
+  ce->store_parameter(stub->pre_val()->as_register(), 0);
+  __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
+  __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  __ bind(*stub->entry());
+  assert(stub->addr()->is_register(), "Precondition.");
+  assert(stub->new_val()->is_register(), "Precondition.");
+  Register new_val_reg = stub->new_val()->as_register();
+  __ cbz(new_val_reg, *stub->continuation());
+  ce->store_parameter(stub->addr()->as_pointer_register(), 0);
+  __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
+  __ b(*stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  __ prologue("g1_pre_barrier", false);
+
+  // arg0 : previous value of memory
+
+  BarrierSet* bs = BarrierSet::barrier_set();
+
+  const Register pre_val = r0;
+  const Register thread = rthread;
+  const Register tmp = rscratch1;
+
+  Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
+  Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  Label done;
+  Label runtime;
+
+  // Is marking still active?
+  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+    __ ldrw(tmp, in_progress);
+  } else {
+    assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+    __ ldrb(tmp, in_progress);
+  }
+  __ cbzw(tmp, done);
+
+  // Can we store original value in the thread's buffer?
+  __ ldr(tmp, queue_index);
+  __ cbz(tmp, runtime);
+
+  __ sub(tmp, tmp, wordSize);
+  __ str(tmp, queue_index);
+  __ ldr(rscratch2, buffer);
+  __ add(tmp, tmp, rscratch2);
+  __ load_parameter(0, rscratch2);
+  __ str(rscratch2, Address(tmp, 0));
+  __ b(done);
+
+  __ bind(runtime);
+  __ push_call_clobbered_registers();
+  __ load_parameter(0, pre_val);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+  __ pop_call_clobbered_registers();
+  __ bind(done);
+
+  __ epilogue();
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+  __ prologue("g1_post_barrier", false);
+
+  // arg0: store_address
+  Address store_addr(rfp, 2*BytesPerWord);
+
+  BarrierSet* bs = BarrierSet::barrier_set();
+  CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+  CardTable* ct = ctbs->card_table();
+  assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+  Label done;
+  Label runtime;
+
+  // At this point we know new_value is non-NULL and the new_value crosses regions.
+  // Must check to see if card is already dirty
+
+  const Register thread = rthread;
+
+  Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
+  Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
+
+  const Register card_offset = rscratch2;
+  // LR is free here, so we can use it to hold the byte_map_base.
+  const Register byte_map_base = lr;
+
+  assert_different_registers(card_offset, byte_map_base, rscratch1);
+
+  __ load_parameter(0, card_offset);
+  __ lsr(card_offset, card_offset, CardTable::card_shift);
+  __ load_byte_map_base(byte_map_base);
+  __ ldrb(rscratch1, Address(byte_map_base, card_offset));
+  __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
+  __ br(Assembler::EQ, done);
+
+  assert((int)CardTable::dirty_card_val() == 0, "must be 0");
+
+  __ membar(Assembler::StoreLoad);
+  __ ldrb(rscratch1, Address(byte_map_base, card_offset));
+  __ cbzw(rscratch1, done);
+
+  // storing region crossing non-NULL, card is clean.
+  // dirty card and log.
+  __ strb(zr, Address(byte_map_base, card_offset));
+
+  // Convert card offset into an address in card_addr
+  Register card_addr = card_offset;
+  __ add(card_addr, byte_map_base, card_addr);
+
+  __ ldr(rscratch1, queue_index);
+  __ cbz(rscratch1, runtime);
+  __ sub(rscratch1, rscratch1, wordSize);
+  __ str(rscratch1, queue_index);
+
+  // Reuse LR to hold buffer_addr
+  const Register buffer_addr = lr;
+
+  __ ldr(buffer_addr, buffer);
+  __ str(card_addr, Address(buffer_addr, rscratch1));
+  __ b(done);
+
+  __ bind(runtime);
+  __ push_call_clobbered_registers();
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+  __ pop_call_clobbered_registers();
+  __ bind(done);
+  __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp	Wed May 02 09:16:10 2018 -0700
@@ -27,6 +27,12 @@
 
 #include "asm/macroAssembler.hpp"
 #include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
 
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
@@ -54,6 +60,14 @@
                             Address dst, Register val, Register tmp1, Register tmp2);
 
 public:
+#ifdef COMPILER1
+  void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+  void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+  void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
   void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                Register dst, Address src, Register tmp1, Register tmp_thread);
 };
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
+#include "runtime/jniHandles.hpp"
 
 #define __ masm->
 
@@ -64,3 +65,11 @@
   default: Unimplemented();
   }
 }
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
+                                                        Register obj, Register tmp, Label& slowpath) {
+  // If mask changes we need to ensure that the inverse is still encodable as an immediate
+  STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
+  __ andr(obj, obj, ~JNIHandles::weak_tag_mask);
+  __ ldr(obj, Address(obj, 0));             // *obj
+}
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Wed May 02 09:16:10 2018 -0700
@@ -40,6 +40,9 @@
   virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                         Address dst, Register val, Register tmp1, Register tmp2);
 
+  virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
+                                             Register obj, Register tmp, Label& slowpath);
+
   virtual void barrier_stubs_init() {}
 };
 
--- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -56,7 +56,7 @@
     __ strb(zr, Address(obj, rscratch1));
     __ bind(L_already_dirty);
   } else {
-    if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
+    if (ct->scanned_concurrently()) {
       __ membar(Assembler::StoreStore);
     }
     __ strb(zr, Address(obj, rscratch1));
@@ -79,7 +79,7 @@
   const Register count = end; // 'end' register contains bytes count now
   __ load_byte_map_base(scratch);
   __ add(start, start, scratch);
-  if (UseConcMarkSweepGC) {
+  if (ct->scanned_concurrently()) {
     __ membar(__ StoreStore);
   }
   __ bind(L_loop);
--- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -25,6 +25,8 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
 #include "memory/resourceArea.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
@@ -82,11 +84,9 @@
                                               // robj ^ rcounter ^ rcounter == robj
                                               // robj is address dependent on rcounter.
 
-  // If mask changes we need to ensure that the inverse is still encodable as an immediate
-  STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
-  __ andr(robj, robj, ~JNIHandles::weak_tag_mask);
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->try_resolve_jobject_in_native(masm, c_rarg0, robj, rscratch1, slow);
 
-  __ ldr(robj, Address(robj, 0));             // *obj
   __ lsr(roffset, c_rarg2, 2);                // offset
 
   assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
@@ -177,4 +177,3 @@
 address JNI_FastGetField::generate_fast_get_double_field() {
   return generate_fast_get_int_field0(T_DOUBLE);
 }
-
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -30,6 +30,7 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
 #include "runtime/frame.inline.hpp"
 
 #define __ _masm->
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Wed May 02 09:16:10 2018 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,9 +133,29 @@
     address addr = MacroAssembler::target_addr_for_insn(instruction_address());
     *(intptr_t*)addr = x;
   } else {
+    // Store x into the instruction stream.
     MacroAssembler::pd_patch_instruction(instruction_address(), (address)x);
     ICache::invalidate_range(instruction_address(), instruction_size);
   }
+
+  // Find and replace the oop/metadata corresponding to this
+  // instruction in oops section.
+  CodeBlob* cb = CodeCache::find_blob(instruction_address());
+  nmethod* nm = cb->as_nmethod_or_null();
+  if (nm != NULL) {
+    RelocIterator iter(nm, instruction_address(), next_instruction_address());
+    while (iter.next()) {
+      if (iter.type() == relocInfo::oop_type) {
+        oop* oop_addr = iter.oop_reloc()->oop_addr();
+        *oop_addr = cast_to_oop(x);
+        break;
+      } else if (iter.type() == relocInfo::metadata_type) {
+        Metadata** metadata_addr = iter.metadata_reloc()->metadata_addr();
+        *metadata_addr = (Metadata*)x;
+        break;
+      }
+    }
+  }
 }
 
 void NativeMovConstReg::print() {
@@ -348,7 +368,7 @@
   CodeBuffer cb(code_pos, instruction_size);
   MacroAssembler a(&cb);
 
-  a.mov(rscratch1, entry);
+  a.movptr(rscratch1, (uintptr_t)entry);
   a.br(rscratch1);
 
   ICache::invalidate_range(code_pos, instruction_size);
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -33,9 +33,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_arm.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #define __ ce->masm()->
 
@@ -466,45 +463,4 @@
   __ b(_continuation);
 }
 
-/////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-  // At this point we know that marking is in progress.
-  // If do_load() is true then we have to emit the
-  // load of the previous value; otherwise it has already
-  // been loaded into _pre_val.
-
-  __ bind(_entry);
-  assert(pre_val()->is_register(), "Precondition.");
-
-  Register pre_val_reg = pre_val()->as_register();
-
-  if (do_load()) {
-    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
-  }
-
-  __ cbz(pre_val_reg, _continuation);
-  ce->verify_reserved_argument_area_size(1);
-  __ str(pre_val_reg, Address(SP));
-  __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
-
-  __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
-  __ bind(_entry);
-  assert(addr()->is_register(), "Precondition.");
-  assert(new_val()->is_register(), "Precondition.");
-  Register new_val_reg = new_val()->as_register();
-  __ cbz(new_val_reg, _continuation);
-  ce->verify_reserved_argument_area_size(1);
-  __ str(addr()->as_pointer_register(), Address(SP));
-  __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
-  __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-/////////////////////////////////////////////////////////////////////////////
-
 #undef __
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -34,6 +34,7 @@
 #include "ci/ciObjArrayKlass.hpp"
 #include "ci/ciTypeArrayKlass.hpp"
 #include "ci/ciUtilities.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -374,32 +375,17 @@
 }
 
 
-LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
-                                              BasicType type, bool needs_card_mark) {
+LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) {
   int base_offset = arrayOopDesc::base_offset_in_bytes(type);
   int elem_size = type2aelembytes(type);
 
   if (index_opr->is_constant()) {
     int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size;
-    if (needs_card_mark) {
-      LIR_Opr base_opr = new_pointer_register();
-      add_large_constant(array_opr, offset, base_opr);
-      return new LIR_Address(base_opr, (intx)0, type);
-    } else {
-      return generate_address(array_opr, offset, type);
-    }
+    return generate_address(array_opr, offset, type);
   } else {
     assert(index_opr->is_register(), "must be");
     int scale = exact_log2(elem_size);
-    if (needs_card_mark) {
-      LIR_Opr base_opr = new_pointer_register();
-      LIR_Address* addr = make_address(base_opr, index_opr, (LIR_Address::Scale)scale, type);
-      __ add(array_opr, LIR_OprFact::intptrConst(base_offset), base_opr);
-      __ add(base_opr, LIR_OprFact::address(addr), base_opr); // add with shifted/extended register
-      return new LIR_Address(base_opr, type);
-    } else {
-      return generate_address(array_opr, index_opr, scale, base_offset, type);
-    }
+    return generate_address(array_opr, index_opr, scale, base_offset, type);
   }
 }
 
@@ -542,88 +528,17 @@
   }
 }
 
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+  LIR_Opr tmp1 = FrameMap::R0_oop_opr;
+  LIR_Opr tmp2 = FrameMap::R1_oop_opr;
+  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
+  __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
 //----------------------------------------------------------------------
 //             visitor functions
 //----------------------------------------------------------------------
 
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_pinned(),"");
-  bool needs_range_check = x->compute_needs_range_check();
-  bool use_length = x->length() != NULL;
-  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
-                                         !get_jobject_constant(x->value())->is_null_object() ||
-                                         x->should_profile());
-
-  LIRItem array(x->array(), this);
-  LIRItem index(x->index(), this);
-  LIRItem value(x->value(), this);
-  LIRItem length(this);
-
-  array.load_item();
-  index.load_nonconstant();
-
-  if (use_length && needs_range_check) {
-    length.set_instruction(x->length());
-    length.load_item();
-  }
-  if (needs_store_check || x->check_boolean()) {
-    value.load_item();
-  } else {
-    value.load_for_store(x->elt_type());
-  }
-
-  set_no_result(x);
-
-  // the CodeEmitInfo must be duplicated for each different
-  // LIR-instruction because spilling can occur anywhere between two
-  // instructions and so the debug information must be different
-  CodeEmitInfo* range_check_info = state_for(x);
-  CodeEmitInfo* null_check_info = NULL;
-  if (x->needs_null_check()) {
-    null_check_info = new CodeEmitInfo(range_check_info);
-  }
-
-  // emit array address setup early so it schedules better
-  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
-  if (GenerateRangeChecks && needs_range_check) {
-    if (use_length) {
-      __ cmp(lir_cond_belowEqual, length.result(), index.result());
-      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
-    } else {
-      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
-      // range_check also does the null check
-      null_check_info = NULL;
-    }
-  }
-
-  if (GenerateArrayStoreCheck && needs_store_check) {
-    LIR_Opr tmp1 = FrameMap::R0_oop_opr;
-    LIR_Opr tmp2 = FrameMap::R1_oop_opr;
-    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
-    __ store_check(value.result(), array.result(), tmp1, tmp2,
-                   LIR_OprFact::illegalOpr, store_check_info,
-                   x->profiled_method(), x->profiled_bci());
-  }
-
-#if INCLUDE_ALL_GCS
-  if (obj_store) {
-    // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-  }
-#endif // INCLUDE_ALL_GCS
-
-  LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
-  __ move(result, array_addr, null_check_info);
-  if (obj_store) {
-    post_barrier(LIR_OprFact::address(array_addr), value.result());
-  }
-}
-
-
 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
   assert(x->is_pinned(),"");
   LIRItem obj(x->obj(), this);
@@ -1060,56 +975,52 @@
 #endif // __SOFTFP__
 }
 
-
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
-  assert(x->number_of_arguments() == 4, "wrong type");
-  LIRItem obj   (x->argument_at(0), this);  // object
-  LIRItem offset(x->argument_at(1), this);  // offset of field
-  LIRItem cmp   (x->argument_at(2), this);  // value to compare with field
-  LIRItem val   (x->argument_at(3), this);  // replace field with val if matches cmp
-
-  LIR_Opr addr = new_pointer_register();
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
   LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
   LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
-
-  // get address of field
-  obj.load_item();
-  offset.load_item();
-  cmp.load_item();
-  val.load_item();
-
-  __ add(obj.result(), offset.result(), addr);
-  LIR_Opr result = rlock_result(x);
-
-  if (type == objectType) {
-#if INCLUDE_ALL_GCS
-    // Do the pre-write barrier, if any.
-    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-#endif // INCLUDE_ALL_GCS
+  new_value.load_item();
+  cmp_value.load_item();
+  LIR_Opr result = new_register(T_INT);
+  if (type == T_OBJECT || type == T_ARRAY) {
 #ifdef AARCH64
     if (UseCompressedOops) {
       tmp1 = new_pointer_register();
       tmp2 = new_pointer_register();
     }
-#endif // AARCH64
-    __ cas_obj(addr, cmp.result(), val.result(), tmp1, tmp2, result);
-    post_barrier(addr, val.result());
-  }
-  else if (type == intType) {
-    __ cas_int(addr, cmp.result(), val.result(), tmp1, tmp1, result);
-  }
-  else if (type == longType) {
+#endif
+    __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
+  } else if (type == T_INT) {
+    __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
+  } else if (type == T_LONG) {
 #ifndef AARCH64
     tmp1 = new_register(T_LONG);
 #endif // !AARCH64
-    __ cas_long(addr, cmp.result(), val.result(), tmp1, tmp2, result);
-  }
-  else {
+    __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
+  } else {
     ShouldNotReachHere();
   }
+  return result;
 }
 
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+  bool is_oop = type == T_OBJECT || type == T_ARRAY;
+  LIR_Opr result = new_register(type);
+  value.load_item();
+  assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+  LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
+  __ xchg(addr, value.result(), result, tmp);
+  return result;
+}
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+  LIR_Opr result = new_register(type);
+  value.load_item();
+  assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
+  LIR_Opr tmp = new_register(type);
+  __ xadd(addr, value.result(), result, tmp);
+  return result;
+}
 
 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
   address runtime_func;
@@ -1409,7 +1320,7 @@
 void LIRGenerator::do_CheckCast(CheckCast* x) {
   LIRItem obj(x->obj(), this);
   CodeEmitInfo* patching_info = NULL;
-  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
+  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
     patching_info = state_for(x, x->state_before());
   }
 
@@ -1669,110 +1580,3 @@
   // TODO-AARCH64 implement with ldar instruction
   __ load(address, result, info, lir_patch_none);
 }
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
-                                     BasicType type, bool is_volatile) {
-#ifdef AARCH64
-  __ load(new LIR_Address(src, offset, type), dst);
-#else
-  assert(offset->is_single_cpu(), "must be");
-  if (is_volatile && dst->is_double_cpu()) {
-    LIR_Opr tmp = new_pointer_register();
-    __ add(src, offset, tmp);
-    __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, type), dst, NULL);
-  } else if (type == T_FLOAT || type == T_DOUBLE) {
-    // fld doesn't have indexed addressing mode
-    LIR_Opr tmp = new_register(T_INT);
-    __ add(src, offset, tmp);
-    __ load(new LIR_Address(tmp, (intx)0, type), dst);
-  } else {
-    __ load(new LIR_Address(src, offset, type), dst);
-  }
-#endif // AARCH64
-}
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
-                                     BasicType type, bool is_volatile) {
-#ifdef AARCH64
-  LIR_Address* addr = new LIR_Address(src, offset, type);
-  if (type == T_ARRAY || type == T_OBJECT) {
-    pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-    __ move(data, addr);
-    assert(src->is_register(), "must be register");
-    post_barrier(LIR_OprFact::address(addr), data);
-  } else {
-    __ move(data, addr);
-  }
-#else
-  assert(offset->is_single_cpu(), "must be");
-  if (is_volatile && data->is_double_cpu()) {
-    LIR_Opr tmp = new_register(T_INT);
-    __ add(src, offset, tmp);
-    __ volatile_store_mem_reg(data, new LIR_Address(tmp, (intx)0, type), NULL);
-  } else if (type == T_FLOAT || type == T_DOUBLE) {
-    // fst doesn't have indexed addressing mode
-    LIR_Opr tmp = new_register(T_INT);
-    __ add(src, offset, tmp);
-    __ move(data, new LIR_Address(tmp, (intx)0, type));
-  } else {
-    LIR_Address* addr = new LIR_Address(src, offset, type);
-    bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#if INCLUDE_ALL_GCS
-    if (is_obj) {
-      // Do the pre-write barrier, if any.
-      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-                  true /* do_load */, false /* patch */, NULL);
-    }
-#endif // INCLUDE_ALL_GCS
-    __ move(data, addr);
-    if (is_obj) {
-      assert(src->is_register(), "must be register");
-      post_barrier(LIR_OprFact::address(addr), data);
-    }
-  }
-#endif // AARCH64
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
-  BasicType type = x->basic_type();
-  LIRItem src(x->object(), this);
-  LIRItem off(x->offset(), this);
-  LIRItem value(x->value(), this);
-
-  src.load_item();
-  if (x->is_add()) {
-    value.load_nonconstant();
-  } else {
-    value.load_item();
-  }
-  off.load_nonconstant();
-
-  LIR_Opr dst = rlock_result(x, type);
-  LIR_Opr data = value.result();
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-
-  assert (type == T_INT || type == T_LONG || (!x->is_add() && is_obj), "unexpected type");
-  LIR_Opr addr_ptr = new_pointer_register();
-
-  __ add(src.result(), off.result(), addr_ptr);
-
-  LIR_Address* addr = new LIR_Address(addr_ptr, (intx)0, type);
-
-  if (x->is_add()) {
-    LIR_Opr tmp = new_register(type);
-    __ xadd(addr_ptr, data, dst, tmp);
-  } else {
-    LIR_Opr tmp = (UseCompressedOops && is_obj) ? new_pointer_register() : LIR_OprFact::illegalOpr;
-    if (is_obj) {
-      // Do the pre-write barrier, if any.
-      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-                  true /* do_load */, false /* patch */, NULL);
-    }
-    __ xchg(addr_ptr, data, dst, tmp);
-    if (is_obj) {
-      // Seems to be a precise address
-      post_barrier(LIR_OprFact::address(addr), data);
-    }
-  }
-}
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -42,11 +42,6 @@
 #include "runtime/vframeArray.hpp"
 #include "utilities/align.hpp"
 #include "vmreg_arm.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
 
 // Note: Rtemp usage is this file should not impact C2 and should be
 // correct as long as it is not implicitly used in lower layers (the
@@ -356,6 +351,13 @@
   restore_live_registers(sasm, true, true, false, restore_fpu_registers);
 }
 
+void StubAssembler::save_live_registers() {
+  ::save_live_registers(this);
+}
+
+void StubAssembler::restore_live_registers_without_return() {
+  ::restore_live_registers_without_return(this);
+}
 
 void Runtime1::initialize_pd() {
 }
@@ -533,201 +535,6 @@
       }
       break;
 
-#if INCLUDE_ALL_GCS
-    case g1_pre_barrier_slow_id:
-      {
-        // Input:
-        // - pre_val pushed on the stack
-
-        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ mov(R0, (int)id);
-          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
-          __ should_not_reach_here();
-          break;
-        }
-
-        // save at least the registers that need saving if the runtime is called
-#ifdef AARCH64
-        __ raw_push(R0, R1);
-        __ raw_push(R2, R3);
-        const int nb_saved_regs = 4;
-#else // AARCH64
-        const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
-        const int nb_saved_regs = 6;
-        assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
-        __ push(saved_regs);
-#endif // AARCH64
-
-        const Register r_pre_val_0  = R0; // must be R0, to be ready for the runtime call
-        const Register r_index_1    = R1;
-        const Register r_buffer_2   = R2;
-
-        Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
-        Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
-        Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
-
-        Label done;
-        Label runtime;
-
-        // Is marking still active?
-        assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
-        __ ldrb(R1, queue_active);
-        __ cbz(R1, done);
-
-        __ ldr(r_index_1, queue_index);
-        __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
-        __ ldr(r_buffer_2, buffer);
-
-        __ subs(r_index_1, r_index_1, wordSize);
-        __ b(runtime, lt);
-
-        __ str(r_index_1, queue_index);
-        __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
-
-        __ bind(done);
-
-#ifdef AARCH64
-        __ raw_pop(R2, R3);
-        __ raw_pop(R0, R1);
-#else // AARCH64
-        __ pop(saved_regs);
-#endif // AARCH64
-
-        __ ret();
-
-        __ bind(runtime);
-
-        save_live_registers(sasm);
-
-        assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
-        __ mov(c_rarg1, Rthread);
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
-
-        restore_live_registers_without_return(sasm);
-
-        __ b(done);
-      }
-      break;
-    case g1_post_barrier_slow_id:
-      {
-        // Input:
-        // - store_addr, pushed on the stack
-
-        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ mov(R0, (int)id);
-          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
-          __ should_not_reach_here();
-          break;
-        }
-
-        Label done;
-        Label recheck;
-        Label runtime;
-
-        Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
-        Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
-
-        AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
-
-        // save at least the registers that need saving if the runtime is called
-#ifdef AARCH64
-        __ raw_push(R0, R1);
-        __ raw_push(R2, R3);
-        const int nb_saved_regs = 4;
-#else // AARCH64
-        const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
-        const int nb_saved_regs = 6;
-        assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
-        __ push(saved_regs);
-#endif // AARCH64
-
-        const Register r_card_addr_0 = R0; // must be R0 for the slow case
-        const Register r_obj_0 = R0;
-        const Register r_card_base_1 = R1;
-        const Register r_tmp2 = R2;
-        const Register r_index_2 = R2;
-        const Register r_buffer_3 = R3;
-        const Register tmp1 = Rtemp;
-
-        __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
-        // Note: there is a comment in x86 code about not using
-        // ExternalAddress / lea, due to relocation not working
-        // properly for that address. Should be OK for arm, where we
-        // explicitly specify that 'cardtable' has a relocInfo::none
-        // type.
-        __ lea(r_card_base_1, cardtable);
-        __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
-
-        // first quick check without barrier
-        __ ldrb(r_tmp2, Address(r_card_addr_0));
-
-        __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
-        __ b(recheck, ne);
-
-        __ bind(done);
-
-#ifdef AARCH64
-        __ raw_pop(R2, R3);
-        __ raw_pop(R0, R1);
-#else // AARCH64
-        __ pop(saved_regs);
-#endif // AARCH64
-
-        __ ret();
-
-        __ bind(recheck);
-
-        __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
-
-        // reload card state after the barrier that ensures the stored oop was visible
-        __ ldrb(r_tmp2, Address(r_card_addr_0));
-
-        assert(CardTable::dirty_card_val() == 0, "adjust this code");
-        __ cbz(r_tmp2, done);
-
-        // storing region crossing non-NULL, card is clean.
-        // dirty card and log.
-
-        assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
-        if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
-          // Card table is aligned so the lowest byte of the table address base is zero.
-          __ strb(r_card_base_1, Address(r_card_addr_0));
-        } else {
-          __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
-        }
-
-        __ ldr(r_index_2, queue_index);
-        __ ldr(r_buffer_3, buffer);
-
-        __ subs(r_index_2, r_index_2, wordSize);
-        __ b(runtime, lt); // go to runtime if now negative
-
-        __ str(r_index_2, queue_index);
-
-        __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
-
-        __ b(done);
-
-        __ bind(runtime);
-
-        save_live_registers(sasm);
-
-        assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
-        __ mov(c_rarg1, Rthread);
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
-
-        restore_live_registers_without_return(sasm);
-
-        __ b(done);
-      }
-      break;
-#endif // INCLUDE_ALL_GCS
     case new_instance_id:
     case fast_new_instance_id:
     case fast_new_instance_init_check_id:
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -26,12 +26,18 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
 
 #define __ masm->
 
@@ -120,3 +126,227 @@
 #endif // !R9_IS_SCRATCHED
 #endif // !AARCH64
 }
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
+  __ bind(*stub->entry());
+  assert(stub->pre_val()->is_register(), "Precondition.");
+
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+
+  __ cbz(pre_val_reg, *stub->continuation());
+  ce->verify_reserved_argument_area_size(1);
+  __ str(pre_val_reg, Address(SP));
+  __ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
+
+  __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  __ bind(*stub->entry());
+  assert(stub->addr()->is_register(), "Precondition.");
+  assert(stub->new_val()->is_register(), "Precondition.");
+  Register new_val_reg = stub->new_val()->as_register();
+  __ cbz(new_val_reg, *stub->continuation());
+  ce->verify_reserved_argument_area_size(1);
+  __ str(stub->addr()->as_pointer_register(), Address(SP));
+  __ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
+  __ b(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  // Input:
+  // - pre_val pushed on the stack
+
+  __ set_info("g1_pre_barrier_slow_id", false);
+
+  // save at least the registers that need saving if the runtime is called
+#ifdef AARCH64
+  __ raw_push(R0, R1);
+  __ raw_push(R2, R3);
+  const int nb_saved_regs = 4;
+#else // AARCH64
+  const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
+  const int nb_saved_regs = 6;
+  assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
+  __ push(saved_regs);
+#endif // AARCH64
+
+  const Register r_pre_val_0  = R0; // must be R0, to be ready for the runtime call
+  const Register r_index_1    = R1;
+  const Register r_buffer_2   = R2;
+
+  Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
+  Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
+  Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
+
+  Label done;
+  Label runtime;
+
+  // Is marking still active?
+  assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+  __ ldrb(R1, queue_active);
+  __ cbz(R1, done);
+
+  __ ldr(r_index_1, queue_index);
+  __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
+  __ ldr(r_buffer_2, buffer);
+
+  __ subs(r_index_1, r_index_1, wordSize);
+  __ b(runtime, lt);
+
+  __ str(r_index_1, queue_index);
+  __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
+
+  __ bind(done);
+
+#ifdef AARCH64
+  __ raw_pop(R2, R3);
+  __ raw_pop(R0, R1);
+#else // AARCH64
+  __ pop(saved_regs);
+#endif // AARCH64
+
+  __ ret();
+
+  __ bind(runtime);
+
+  __ save_live_registers();
+
+  assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
+  __ mov(c_rarg1, Rthread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
+
+  __ restore_live_registers_without_return();
+
+  __ b(done);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+  // Input:
+  // - store_addr, pushed on the stack
+
+  __ set_info("g1_post_barrier_slow_id", false);
+
+  Label done;
+  Label recheck;
+  Label runtime;
+
+  Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
+  Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
+
+  AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
+
+  // save at least the registers that need saving if the runtime is called
+#ifdef AARCH64
+  __ raw_push(R0, R1);
+  __ raw_push(R2, R3);
+  const int nb_saved_regs = 4;
+#else // AARCH64
+  const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
+  const int nb_saved_regs = 6;
+  assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
+  __ push(saved_regs);
+#endif // AARCH64
+
+  const Register r_card_addr_0 = R0; // must be R0 for the slow case
+  const Register r_obj_0 = R0;
+  const Register r_card_base_1 = R1;
+  const Register r_tmp2 = R2;
+  const Register r_index_2 = R2;
+  const Register r_buffer_3 = R3;
+  const Register tmp1 = Rtemp;
+
+  __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
+  // Note: there is a comment in x86 code about not using
+  // ExternalAddress / lea, due to relocation not working
+  // properly for that address. Should be OK for arm, where we
+  // explicitly specify that 'cardtable' has a relocInfo::none
+  // type.
+  __ lea(r_card_base_1, cardtable);
+  __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
+
+  // first quick check without barrier
+  __ ldrb(r_tmp2, Address(r_card_addr_0));
+
+  __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
+  __ b(recheck, ne);
+
+  __ bind(done);
+
+#ifdef AARCH64
+  __ raw_pop(R2, R3);
+  __ raw_pop(R0, R1);
+#else // AARCH64
+  __ pop(saved_regs);
+#endif // AARCH64
+
+  __ ret();
+
+  __ bind(recheck);
+
+  __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
+
+  // reload card state after the barrier that ensures the stored oop was visible
+  __ ldrb(r_tmp2, Address(r_card_addr_0));
+
+  assert(CardTable::dirty_card_val() == 0, "adjust this code");
+  __ cbz(r_tmp2, done);
+
+  // storing region crossing non-NULL, card is clean.
+  // dirty card and log.
+
+  assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
+  if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
+    // Card table is aligned so the lowest byte of the table address base is zero.
+    __ strb(r_card_base_1, Address(r_card_addr_0));
+  } else {
+    __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
+  }
+
+  __ ldr(r_index_2, queue_index);
+  __ ldr(r_buffer_3, buffer);
+
+  __ subs(r_index_2, r_index_2, wordSize);
+  __ b(runtime, lt); // go to runtime if now negative
+
+  __ str(r_index_2, queue_index);
+
+  __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
+
+  __ b(done);
+
+  __ bind(runtime);
+
+  __ save_live_registers();
+
+  assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
+  __ mov(c_rarg1, Rthread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
+
+  __ restore_live_registers_without_return();
+
+  __ b(done);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp	Wed May 02 09:16:10 2018 -0700
@@ -27,6 +27,12 @@
 
 #include "asm/macroAssembler.hpp"
 #include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
 
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
@@ -34,6 +40,15 @@
                                        Register addr, Register count, int callee_saved_regs);
   void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                         Register addr, Register count, Register tmp);
+
+#ifdef COMPILER1
+public:
+  void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+  void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+  void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
 };
 
 #endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
 
 #define __ _masm->
 
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Wed May 02 09:16:10 2018 -0700
@@ -29,6 +29,7 @@
 #include "nativeInst_arm.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.hpp"
+#include "runtime/orderAccess.inline.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -33,9 +33,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_ppc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #define __ ce->masm()->
 
@@ -470,58 +467,4 @@
   __ b(_continuation);
 }
 
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-  // At this point we know that marking is in progress.
-  // If do_load() is true then we have to emit the
-  // load of the previous value; otherwise it has already
-  // been loaded into _pre_val.
-
-  __ bind(_entry);
-
-  assert(pre_val()->is_register(), "Precondition.");
-  Register pre_val_reg = pre_val()->as_register();
-
-  if (do_load()) {
-    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
-  }
-
-  __ cmpdi(CCR0, pre_val_reg, 0);
-  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
-
-  address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
-  //__ load_const_optimized(R0, stub);
-  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
-  __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
-  __ mtctr(R0);
-  __ bctrl();
-  __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
-  __ bind(_entry);
-
-  assert(addr()->is_register(), "Precondition.");
-  assert(new_val()->is_register(), "Precondition.");
-  Register addr_reg = addr()->as_pointer_register();
-  Register new_val_reg = new_val()->as_register();
-
-  __ cmpdi(CCR0, new_val_reg, 0);
-  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
-
-  address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
-  //__ load_const_optimized(R0, stub);
-  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
-  __ mtctr(R0);
-  __ mr(R0, addr_reg); // Pass addr in R0.
-  __ bctrl();
-  __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-///////////////////////////////////////////////////////////////////////////////////
-
 #undef __
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -2978,7 +2978,9 @@
 
 
 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
-  const Register Rptr = src->as_pointer_register(),
+  const LIR_Address *addr = src->as_address_ptr();
+  assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
+  const Register Rptr = addr->base()->as_pointer_register(),
                  Rtmp = tmp->as_register();
   Register Rco = noreg;
   if (UseCompressedOops && data->is_oop()) {
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -149,7 +149,12 @@
 
   // Accumulate fixed displacements.
   if (index->is_constant()) {
-    large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
+    LIR_Const *constant = index->as_constant_ptr();
+    if (constant->type() == T_LONG) {
+      large_disp += constant->as_jlong() << shift;
+    } else {
+      large_disp += (intx)(constant->as_jint()) << shift;
+    }
     index = LIR_OprFact::illegalOpr;
   }
 
@@ -190,7 +195,7 @@
 
 
 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
-                                              BasicType type, bool needs_card_mark) {
+                                              BasicType type) {
   int elem_size = type2aelembytes(type);
   int shift = exact_log2(elem_size);
 
@@ -230,13 +235,7 @@
       __ add(index_opr, array_opr, base_opr);
     }
   }
-  if (needs_card_mark) {
-    LIR_Opr ptr = new_pointer_register();
-    __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
-    return new LIR_Address(ptr, type);
-  } else {
-    return new LIR_Address(base_opr, offset, type);
-  }
+  return new LIR_Address(base_opr, offset, type);
 }
 
 
@@ -320,80 +319,12 @@
 //             visitor functions
 //----------------------------------------------------------------------
 
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_pinned(),"");
-  bool needs_range_check = x->compute_needs_range_check();
-  bool use_length = x->length() != NULL;
-  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
-                                         !get_jobject_constant(x->value())->is_null_object() ||
-                                         x->should_profile());
-
-  LIRItem array(x->array(), this);
-  LIRItem index(x->index(), this);
-  LIRItem value(x->value(), this);
-  LIRItem length(this);
-
-  array.load_item();
-  index.load_nonconstant();
-
-  if (use_length && needs_range_check) {
-    length.set_instruction(x->length());
-    length.load_item();
-  }
-  if (needs_store_check || x->check_boolean()) {
-    value.load_item();
-  } else {
-    value.load_for_store(x->elt_type());
-  }
-
-  set_no_result(x);
-
-  // The CodeEmitInfo must be duplicated for each different
-  // LIR-instruction because spilling can occur anywhere between two
-  // instructions and so the debug information must be different.
-  CodeEmitInfo* range_check_info = state_for(x);
-  CodeEmitInfo* null_check_info = NULL;
-  if (x->needs_null_check()) {
-    null_check_info = new CodeEmitInfo(range_check_info);
-  }
-
-  // Emit array address setup early so it schedules better.
-  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
-  if (GenerateRangeChecks && needs_range_check) {
-    if (use_length) {
-      __ cmp(lir_cond_belowEqual, length.result(), index.result());
-      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
-    } else {
-      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
-      // Range_check also does the null check.
-      null_check_info = NULL;
-    }
-  }
-
-  if (GenerateArrayStoreCheck && needs_store_check) {
-    // Following registers are used by slow_subtype_check:
-    LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
-    LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
-    LIR_Opr tmp3 = FrameMap::R6_opr; // temp
-
-    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
-    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3,
-                   store_check_info, x->profiled_method(), x->profiled_bci());
-  }
-
-  if (obj_store) {
-    // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-  }
-  LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
-  __ move(result, array_addr, null_check_info);
-  if (obj_store) {
-    // Precise card mark.
-    post_barrier(LIR_OprFact::address(array_addr), value.result());
-  }
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+  // Following registers are used by slow_subtype_check:
+  LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
+  LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
+  LIR_Opr tmp3 = FrameMap::R6_opr; // temp
+  __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 }
 
 
@@ -702,24 +633,68 @@
 }
 
 
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
-  assert(x->number_of_arguments() == 4, "wrong type");
-  LIRItem obj   (x->argument_at(0), this);  // object
-  LIRItem offset(x->argument_at(1), this);  // offset of field
-  LIRItem cmp   (x->argument_at(2), this);  // Value to compare with field.
-  LIRItem val   (x->argument_at(3), this);  // Replace field with val if matches cmp.
-
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+  LIR_Opr result = new_register(T_INT);
   LIR_Opr t1 = LIR_OprFact::illegalOpr;
   LIR_Opr t2 = LIR_OprFact::illegalOpr;
-  LIR_Opr addr = new_pointer_register();
+  cmp_value.load_item();
+  new_value.load_item();
 
-  // Get address of field.
-  obj.load_item();
-  offset.load_item();
-  cmp.load_item();
-  val.load_item();
+  // Volatile load may be followed by Unsafe CAS.
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+    __ membar();
+  } else {
+    __ membar_release();
+  }
 
-  __ add(obj.result(), offset.result(), addr);
+  if (type == T_OBJECT || type == T_ARRAY) {
+    if (UseCompressedOops) {
+      t1 = new_register(T_OBJECT);
+      t2 = new_register(T_OBJECT);
+    }
+    __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+  } else if (type == T_INT) {
+    __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+  } else if (type == T_LONG) {
+    __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+  } else {
+    Unimplemented();
+  }
+  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+           result, type);
+  return result;
+}
+
+
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+  LIR_Opr result = new_register(type);
+  LIR_Opr tmp = FrameMap::R0_opr;
+
+  value.load_item();
+
+  // Volatile load may be followed by Unsafe CAS.
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+    __ membar();
+  } else {
+    __ membar_release();
+  }
+
+  __ xchg(addr, value.result(), result, tmp);
+
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+    __ membar_acquire();
+  } else {
+    __ membar();
+  }
+  return result;
+}
+
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+  LIR_Opr result = new_register(type);
+  LIR_Opr tmp = FrameMap::R0_opr;
+
+  value.load_item();
 
   // Volatile load may be followed by Unsafe CAS.
   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
@@ -728,33 +703,14 @@
     __ membar_release();
   }
 
-  if (type == objectType) {  // Write-barrier needed for Object fields.
-    // Only cmp value can get overwritten, no do_load required.
-    pre_barrier(LIR_OprFact::illegalOpr /* addr */, cmp.result() /* pre_val */,
-                false /* do_load */, false /* patch */, NULL);
+  __ xadd(addr, value.result(), result, tmp);
+
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+    __ membar_acquire();
+  } else {
+    __ membar();
   }
-
-  if (type == objectType) {
-    if (UseCompressedOops) {
-      t1 = new_register(T_OBJECT);
-      t2 = new_register(T_OBJECT);
-    }
-    __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
-  } else if (type == intType) {
-    __ cas_int(addr, cmp.result(), val.result(), t1, t2);
-  } else if (type == longType) {
-    __ cas_long(addr, cmp.result(), val.result(), t1, t2);
-  } else {
-    ShouldNotReachHere();
-  }
-  // Benerate conditional move of boolean result.
-  LIR_Opr result = rlock_result(x);
-  __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
-           result, as_BasicType(type));
-  if (type == objectType) {  // Write-barrier needed for Object fields.
-    // Precise card mark since could either be object or array.
-    post_barrier(addr, val.result());
-  }
+  return result;
 }
 
 
@@ -1112,7 +1068,7 @@
 void LIRGenerator::do_CheckCast(CheckCast* x) {
   LIRItem obj(x->obj(), this);
   CodeEmitInfo* patching_info = NULL;
-  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
+  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
     // Must do this before locking the destination register as
     // an oop register, and before the obj is loaded (so x->obj()->item()
     // is valid for creating a debug info location).
@@ -1255,110 +1211,6 @@
 }
 
 
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
-                                     BasicType type, bool is_volatile) {
-  LIR_Opr base_op = src;
-  LIR_Opr index_op = offset;
-
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#ifndef _LP64
-  if (is_volatile && type == T_LONG) {
-    __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
-  } else
-#endif
-  {
-    if (type == T_BOOLEAN) {
-      type = T_BYTE;
-    }
-    LIR_Address* addr;
-    if (type == T_ARRAY || type == T_OBJECT) {
-      LIR_Opr tmp = new_pointer_register();
-      __ add(base_op, index_op, tmp);
-      addr = new LIR_Address(tmp, type);
-    } else {
-      addr = new LIR_Address(base_op, index_op, type);
-    }
-
-    if (is_obj) {
-      pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-          true /* do_load */, false /* patch */, NULL);
-      // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
-    }
-    __ move(data, addr);
-    if (is_obj) {
-      // This address is precise.
-      post_barrier(LIR_OprFact::address(addr), data);
-    }
-  }
-}
-
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
-                                     BasicType type, bool is_volatile) {
-#ifndef _LP64
-  if (is_volatile && type == T_LONG) {
-    __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
-  } else
-#endif
-    {
-    LIR_Address* addr = new LIR_Address(src, offset, type);
-    __ load(addr, dst);
-  }
-}
-
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
-  BasicType type = x->basic_type();
-  LIRItem src(x->object(), this);
-  LIRItem off(x->offset(), this);
-  LIRItem value(x->value(), this);
-
-  src.load_item();
-  value.load_item();
-  off.load_nonconstant();
-
-  LIR_Opr dst = rlock_result(x, type);
-  LIR_Opr data = value.result();
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-
-  LIR_Opr tmp = FrameMap::R0_opr;
-  LIR_Opr ptr = new_pointer_register();
-  __ add(src.result(), off.result(), ptr);
-
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    __ membar();
-  } else {
-    __ membar_release();
-  }
-
-  if (x->is_add()) {
-    __ xadd(ptr, data, dst, tmp);
-  } else {
-    const bool can_move_barrier = true; // TODO: port GraphKit::can_move_pre_barrier() from C2
-    if (!can_move_barrier && is_obj) {
-      // Do the pre-write barrier, if any.
-      pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
-                  true /* do_load */, false /* patch */, NULL);
-    }
-    __ xchg(ptr, data, dst, tmp);
-    if (is_obj) {
-      // Seems to be a precise address.
-      post_barrier(ptr, data);
-      if (can_move_barrier) {
-        pre_barrier(LIR_OprFact::illegalOpr, dst /* pre_val */,
-                    false /* do_load */, false /* patch */, NULL);
-      }
-    }
-  }
-
-  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
-    __ membar_acquire();
-  } else {
-    __ membar();
-  }
-}
-
-
 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
   assert(UseCRC32Intrinsics, "or should not be here");
   LIR_Opr result = rlock_result(x);
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -42,11 +42,6 @@
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_ppc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
 
 // Implementation of StubAssembler
 
@@ -708,164 +703,6 @@
       }
       break;
 
-#if INCLUDE_ALL_GCS
-    case g1_pre_barrier_slow_id:
-      {
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          goto unimplemented_entry;
-        }
-
-        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
-        // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
-        const int stack_slots = 3;
-        Register pre_val = R0; // previous value of memory
-        Register tmp  = R14;
-        Register tmp2 = R15;
-
-        Label refill, restart, marking_not_active;
-        int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
-        int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
-        int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
-
-        // Spill
-        __ std(tmp, -16, R1_SP);
-        __ std(tmp2, -24, R1_SP);
-
-        // Is marking still active?
-        if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
-          __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
-        } else {
-          assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
-          __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
-        }
-        __ cmpdi(CCR0, tmp, 0);
-        __ beq(CCR0, marking_not_active);
-
-        __ bind(restart);
-        // Load the index into the SATB buffer. SATBMarkQueue::_index is a
-        // size_t so ld_ptr is appropriate.
-        __ ld(tmp, satb_q_index_byte_offset, R16_thread);
-
-        // index == 0?
-        __ cmpdi(CCR0, tmp, 0);
-        __ beq(CCR0, refill);
-
-        __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
-        __ ld(pre_val, -8, R1_SP); // Load from stack.
-        __ addi(tmp, tmp, -oopSize);
-
-        __ std(tmp, satb_q_index_byte_offset, R16_thread);
-        __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
-
-        __ bind(marking_not_active);
-        // Restore temp registers and return-from-leaf.
-        __ ld(tmp2, -24, R1_SP);
-        __ ld(tmp, -16, R1_SP);
-        __ blr();
-
-        __ bind(refill);
-        const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
-        __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
-        __ mflr(R0);
-        __ std(R0, _abi(lr), R1_SP);
-        __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
-        __ pop_frame();
-        __ ld(R0, _abi(lr), R1_SP);
-        __ mtlr(R0);
-        __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
-        __ b(restart);
-      }
-      break;
-
-  case g1_post_barrier_slow_id:
-    {
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          goto unimplemented_entry;
-        }
-
-        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
-        // Using stack slots: spill addr, spill tmp2
-        const int stack_slots = 2;
-        Register tmp = R0;
-        Register addr = R14;
-        Register tmp2 = R15;
-        jbyte* byte_map_base = ci_card_table_address();
-
-        Label restart, refill, ret;
-
-        // Spill
-        __ std(addr, -8, R1_SP);
-        __ std(tmp2, -16, R1_SP);
-
-        __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
-        __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
-        __ add(addr, tmp2, addr);
-        __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
-
-        // Return if young card.
-        __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
-        __ beq(CCR0, ret);
-
-        // Return if sequential consistent value is already dirty.
-        __ membar(Assembler::StoreLoad);
-        __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
-
-        __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
-        __ beq(CCR0, ret);
-
-        // Not dirty.
-
-        // First, dirty it.
-        __ li(tmp, G1CardTable::dirty_card_val());
-        __ stb(tmp, 0, addr);
-
-        int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
-        int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
-
-        __ bind(restart);
-
-        // Get the index into the update buffer. DirtyCardQueue::_index is
-        // a size_t so ld_ptr is appropriate here.
-        __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
-
-        // index == 0?
-        __ cmpdi(CCR0, tmp2, 0);
-        __ beq(CCR0, refill);
-
-        __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
-        __ addi(tmp2, tmp2, -oopSize);
-
-        __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
-        __ add(tmp2, tmp, tmp2);
-        __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
-
-        // Restore temp registers and return-from-leaf.
-        __ bind(ret);
-        __ ld(tmp2, -16, R1_SP);
-        __ ld(addr, -8, R1_SP);
-        __ blr();
-
-        __ bind(refill);
-        const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
-        __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
-        __ mflr(R0);
-        __ std(R0, _abi(lr), R1_SP);
-        __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
-        __ pop_frame();
-        __ ld(R0, _abi(lr), R1_SP);
-        __ mtlr(R0);
-        __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
-        __ b(restart);
-      }
-      break;
-#endif // INCLUDE_ALL_GCS
-
     case predicate_failed_trap_id:
       {
         __ set_info("predicate_failed_trap", dont_gc_arguments);
@@ -889,7 +726,6 @@
       break;
 
   default:
-  unimplemented_entry:
       {
         __ set_info("unimplemented entry", dont_gc_arguments);
         __ mflr(R0);
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -26,12 +26,17 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
 #include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1BarrierSetAssembler.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "runtime/sharedRuntime.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
 
 #define __ masm->
 
@@ -339,4 +344,209 @@
   __ bind(done);
 }
 
+#ifdef COMPILER1
+
 #undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+
+  __ bind(*stub->entry());
+
+  assert(stub->pre_val()->is_register(), "Precondition.");
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+
+  __ cmpdi(CCR0, pre_val_reg, 0);
+  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+  address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
+  //__ load_const_optimized(R0, c_code);
+  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+  __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
+  __ mtctr(R0);
+  __ bctrl();
+  __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  __ bind(*stub->entry());
+
+  assert(stub->addr()->is_register(), "Precondition.");
+  assert(stub->new_val()->is_register(), "Precondition.");
+  Register addr_reg = stub->addr()->as_pointer_register();
+  Register new_val_reg = stub->new_val()->as_register();
+
+  __ cmpdi(CCR0, new_val_reg, 0);
+  __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+  address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
+  //__ load_const_optimized(R0, c_code);
+  __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+  __ mtctr(R0);
+  __ mr(R0, addr_reg); // Pass addr in R0.
+  __ bctrl();
+  __ b(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  BarrierSet* bs = BarrierSet::barrier_set();
+
+  __ set_info("g1_pre_barrier_slow_id", false);
+
+  // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
+  const int stack_slots = 3;
+  Register pre_val = R0; // previous value of memory
+  Register tmp  = R14;
+  Register tmp2 = R15;
+
+  Label refill, restart, marking_not_active;
+  int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+  int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+  int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+  // Spill
+  __ std(tmp, -16, R1_SP);
+  __ std(tmp2, -24, R1_SP);
+
+  // Is marking still active?
+  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+    __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
+  } else {
+    assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+    __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
+  }
+  __ cmpdi(CCR0, tmp, 0);
+  __ beq(CCR0, marking_not_active);
+
+  __ bind(restart);
+  // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+  // size_t so ld_ptr is appropriate.
+  __ ld(tmp, satb_q_index_byte_offset, R16_thread);
+
+  // index == 0?
+  __ cmpdi(CCR0, tmp, 0);
+  __ beq(CCR0, refill);
+
+  __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
+  __ ld(pre_val, -8, R1_SP); // Load from stack.
+  __ addi(tmp, tmp, -oopSize);
+
+  __ std(tmp, satb_q_index_byte_offset, R16_thread);
+  __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
+
+  __ bind(marking_not_active);
+  // Restore temp registers and return-from-leaf.
+  __ ld(tmp2, -24, R1_SP);
+  __ ld(tmp, -16, R1_SP);
+  __ blr();
+
+  __ bind(refill);
+  const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+  __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  __ mflr(R0);
+  __ std(R0, _abi(lr), R1_SP);
+  __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
+  __ pop_frame();
+  __ ld(R0, _abi(lr), R1_SP);
+  __ mtlr(R0);
+  __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  __ b(restart);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+  G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+
+  __ set_info("g1_post_barrier_slow_id", false);
+
+  // Using stack slots: spill addr, spill tmp2
+  const int stack_slots = 2;
+  Register tmp = R0;
+  Register addr = R14;
+  Register tmp2 = R15;
+  jbyte* byte_map_base = bs->card_table()->byte_map_base();
+
+  Label restart, refill, ret;
+
+  // Spill
+  __ std(addr, -8, R1_SP);
+  __ std(tmp2, -16, R1_SP);
+
+  __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
+  __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
+  __ add(addr, tmp2, addr);
+  __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+  // Return if young card.
+  __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
+  __ beq(CCR0, ret);
+
+  // Return if sequential consistent value is already dirty.
+  __ membar(Assembler::StoreLoad);
+  __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+  __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
+  __ beq(CCR0, ret);
+
+  // Not dirty.
+
+  // First, dirty it.
+  __ li(tmp, G1CardTable::dirty_card_val());
+  __ stb(tmp, 0, addr);
+
+  int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
+  int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
+
+  __ bind(restart);
+
+  // Get the index into the update buffer. DirtyCardQueue::_index is
+  // a size_t so ld_ptr is appropriate here.
+  __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+
+  // index == 0?
+  __ cmpdi(CCR0, tmp2, 0);
+  __ beq(CCR0, refill);
+
+  __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
+  __ addi(tmp2, tmp2, -oopSize);
+
+  __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+  __ add(tmp2, tmp, tmp2);
+  __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
+
+  // Restore temp registers and return-from-leaf.
+  __ bind(ret);
+  __ ld(tmp2, -16, R1_SP);
+  __ ld(addr, -8, R1_SP);
+  __ blr();
+
+  __ bind(refill);
+  const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+  __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  __ mflr(R0);
+  __ std(R0, _abi(lr), R1_SP);
+  __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
+  __ pop_frame();
+  __ ld(R0, _abi(lr), R1_SP);
+  __ mtlr(R0);
+  __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  __ b(restart);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp	Wed May 02 09:16:10 2018 -0700
@@ -28,6 +28,12 @@
 
 #include "asm/macroAssembler.hpp"
 #include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
 
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
 protected:
@@ -45,6 +51,14 @@
                             Register tmp1, Register tmp2, Register tmp3, bool needs_frame);
 
 public:
+#ifdef COMPILER1
+  void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+  void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+  void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
   virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                        Register base, RegisterOrConstant ind_or_offs, Register dst,
                        Register tmp1, Register tmp2, bool needs_frame, Label *is_null = NULL);
--- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -50,7 +50,7 @@
 
   Label Lskip_loop, Lstore_loop;
 
-  if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
+  if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
 
   __ sldi_(count, count, LogBytesPerHeapOop);
   __ beq(CCR0, Lskip_loop); // zero length
@@ -75,11 +75,13 @@
 void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
                                                     jbyte* byte_map_base,
                                                     Register tmp, Register obj) {
+  CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
+  CardTable* ct = ctbs->card_table();
   assert_different_registers(obj, tmp, R0);
   __ load_const_optimized(tmp, (address)byte_map_base, R0);
   __ srdi(obj, obj, CardTable::card_shift);
   __ li(R0, CardTable::dirty_card_val());
-  if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
+  if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
   __ stbx(R0, tmp, obj);
 }
 
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp	Wed May 02 09:16:10 2018 -0700
@@ -34,9 +34,6 @@
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_s390.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #define __ ce->masm()->
 #undef  CHECK_BAILOUT
@@ -453,46 +450,4 @@
   __ branch_optimized(Assembler::bcondAlways, _continuation);
 }
 
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-  // At this point we know that marking is in progress.
-  // If do_load() is true then we have to emit the
-  // load of the previous value; otherwise it has already
-  // been loaded into _pre_val.
-  __ bind(_entry);
-  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
-  assert(pre_val()->is_register(), "Precondition.");
-
-  Register pre_val_reg = pre_val()->as_register();
-
-  if (do_load()) {
-    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
-  }
-
-  __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
-  __ branch_optimized(Assembler::bcondZero, _continuation);
-  ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
-  CHECK_BAILOUT();
-  __ branch_optimized(Assembler::bcondAlways, _continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
-  __ bind(_entry);
-  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
-  assert(addr()->is_register(), "Precondition.");
-  assert(new_val()->is_register(), "Precondition.");
-  Register new_val_reg = new_val()->as_register();
-  __ z_ltgr(new_val_reg, new_val_reg);
-  __ branch_optimized(Assembler::bcondZero, _continuation);
-  __ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
-  ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
-  CHECK_BAILOUT();
-  __ branch_optimized(Assembler::bcondAlways, _continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-
 #undef __
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Wed May 02 09:16:10 2018 -0700
@@ -572,82 +572,145 @@
 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
   assert(src->is_constant(), "should not call otherwise");
   assert(dest->is_address(), "should not call otherwise");
-  // See special case in LIRGenerator::do_StoreIndexed.
-  // T_BYTE: Special case for card mark store.
-  assert(type == T_BYTE || !dest->as_address_ptr()->index()->is_valid(), "not supported");
+
   LIR_Const* c = src->as_constant_ptr();
   Address addr = as_Address(dest->as_address_ptr());
 
   int store_offset = -1;
-  unsigned int lmem = 0;
-  unsigned int lcon = 0;
-  int64_t cbits = 0;
-  switch (type) {
-    case T_INT:    // fall through
-    case T_FLOAT:
-      lmem = 4; lcon = 4; cbits = c->as_jint_bits();
-      break;
-
-    case T_ADDRESS:
-      lmem = 8; lcon = 4; cbits = c->as_jint_bits();
-      break;
-
-    case T_OBJECT:  // fall through
-    case T_ARRAY:
-      if (c->as_jobject() == NULL) {
-        if (UseCompressedOops && !wide) {
-          store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
+
+  if (dest->as_address_ptr()->index()->is_valid()) {
+    switch (type) {
+      case T_INT:    // fall through
+      case T_FLOAT:
+        __ load_const_optimized(Z_R0_scratch, c->as_jint_bits());
+        store_offset = __ offset();
+        if (Immediate::is_uimm12(addr.disp())) {
+          __ z_st(Z_R0_scratch, addr);
         } else {
-          store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
+          __ z_sty(Z_R0_scratch, addr);
         }
-      } else {
-        jobject2reg(c->as_jobject(), Z_R1_scratch);
-        if (UseCompressedOops && !wide) {
-          __ encode_heap_oop(Z_R1_scratch);
-          store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+        break;
+
+      case T_ADDRESS:
+        __ load_const_optimized(Z_R1_scratch, c->as_jint_bits());
+        store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+        break;
+
+      case T_OBJECT:  // fall through
+      case T_ARRAY:
+        if (c->as_jobject() == NULL) {
+          if (UseCompressedOops && !wide) {
+            __ clear_reg(Z_R1_scratch, false);
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+          } else {
+            __ clear_reg(Z_R1_scratch, true);
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+          }
         } else {
-          store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+          jobject2reg(c->as_jobject(), Z_R1_scratch);
+          if (UseCompressedOops && !wide) {
+            __ encode_heap_oop(Z_R1_scratch);
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+          } else {
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+          }
         }
-      }
+        assert(store_offset >= 0, "check");
+        break;
+
+      case T_LONG:    // fall through
+      case T_DOUBLE:
+        __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
+        store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+        break;
+
+      case T_BOOLEAN: // fall through
+      case T_BYTE:
+        __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
+        store_offset = __ offset();
+        if (Immediate::is_uimm12(addr.disp())) {
+          __ z_stc(Z_R0_scratch, addr);
+        } else {
+          __ z_stcy(Z_R0_scratch, addr);
+        }
+        break;
+
+      case T_CHAR:    // fall through
+      case T_SHORT:
+        __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
+        store_offset = __ offset();
+        if (Immediate::is_uimm12(addr.disp())) {
+          __ z_sth(Z_R0_scratch, addr);
+        } else {
+          __ z_sthy(Z_R0_scratch, addr);
+        }
+        break;
+
+      default:
+        ShouldNotReachHere();
+    }
+
+  } else { // no index
+
+    unsigned int lmem = 0;
+    unsigned int lcon = 0;
+    int64_t cbits = 0;
+
+    switch (type) {
+      case T_INT:    // fall through
+      case T_FLOAT:
+        lmem = 4; lcon = 4; cbits = c->as_jint_bits();
+        break;
+
+      case T_ADDRESS:
+        lmem = 8; lcon = 4; cbits = c->as_jint_bits();
+        break;
+
+      case T_OBJECT:  // fall through
+      case T_ARRAY:
+        if (c->as_jobject() == NULL) {
+          if (UseCompressedOops && !wide) {
+            store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
+          } else {
+            store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
+          }
+        } else {
+          jobject2reg(c->as_jobject(), Z_R1_scratch);
+          if (UseCompressedOops && !wide) {
+            __ encode_heap_oop(Z_R1_scratch);
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+          } else {
+            store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+          }
+        }
+        assert(store_offset >= 0, "check");
+        break;
+
+      case T_LONG:    // fall through
+      case T_DOUBLE:
+        lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
+        break;
+
+      case T_BOOLEAN: // fall through
+      case T_BYTE:
+        lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
+        break;
+
+      case T_CHAR:    // fall through
+      case T_SHORT:
+        lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
+        break;
+
+      default:
+        ShouldNotReachHere();
+    }
+
+    if (store_offset == -1) {
+      store_offset = __ store_const(addr, cbits, lmem, lcon);
       assert(store_offset >= 0, "check");
-      break;
-
-    case T_LONG:    // fall through
-    case T_DOUBLE:
-      lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
-      break;
-
-    case T_BOOLEAN: // fall through
-    case T_BYTE:
-      lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
-      break;
-
-    case T_CHAR:    // fall through
-    case T_SHORT:
-      lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
-      break;
-
-    default:
-      ShouldNotReachHere();
-  };
-
-  // Index register is normally not supported, but for
-  // LIRGenerator::CardTableBarrierSet_post_barrier we make an exception.
-  if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) {
-    __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
-    store_offset = __ offset();
-    if (Immediate::is_uimm12(addr.disp())) {
-      __ z_stc(Z_R0_scratch, addr);
-    } else {
-      __ z_stcy(Z_R0_scratch, addr);
     }
   }
 
-  if (store_offset == -1) {
-    store_offset = __ store_const(addr, cbits, lmem, lcon);
-    assert(store_offset >= 0, "check");
-  }
-
   if (info != NULL) {
     add_debug_info_for_null_check(store_offset, info);
   }
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp	Wed May 02 09:16:10 2018 -0700
@@ -140,7 +140,13 @@
                                             int shift, int disp, BasicType type) {
   assert(base->is_register(), "must be");
   if (index->is_constant()) {
-    intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
+    intx large_disp = disp;
+    LIR_Const *constant = index->as_constant_ptr();
+    if (constant->type() == T_LONG) {
+      large_disp += constant->as_jlong() << shift;
+    } else {
+      large_disp += (intx)(constant->as_jint()) << shift;
+    }
     if (Displacement::is_validDisp(large_disp)) {
       return new LIR_Address(base, large_disp, type);
     }
@@ -159,7 +165,7 @@
 }
 
 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
-                                              BasicType type, bool needs_card_mark) {
+                                              BasicType type) {
   int elem_size = type2aelembytes(type);
   int shift = exact_log2(elem_size);
   int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
@@ -181,16 +187,7 @@
                            index_opr,
                            offset_in_bytes, type);
   }
-  if (needs_card_mark) {
-    // This store will need a precise card mark, so go ahead and
-    // compute the full adddres instead of computing once for the
-    // store and again for the card mark.
-    LIR_Opr tmp = new_pointer_register();
-    __ leal(LIR_OprFact::address(addr), tmp);
-    return new LIR_Address(tmp, type);
-  } else {
-    return addr;
-  }
+  return addr;
 }
 
 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@@ -252,86 +249,11 @@
 //             visitor functions
 //----------------------------------------------------------------------
 
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
-  assert(x->is_pinned(),"");
-  bool needs_range_check = x->compute_needs_range_check();
-  bool use_length = x->length() != NULL;
-  bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
-  bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
-                                         !get_jobject_constant(x->value())->is_null_object() ||
-                                         x->should_profile());
-
-  LIRItem array(x->array(), this);
-  LIRItem index(x->index(), this);
-  LIRItem value(x->value(), this);
-  LIRItem length(this);
-
-  array.load_item();
-  index.load_nonconstant(20);
-
-  if (use_length && needs_range_check) {
-    length.set_instruction(x->length());
-    length.load_item();
-  }
-  if (needs_store_check || x->check_boolean()) {
-    value.load_item();
-  } else {
-    value.load_for_store(x->elt_type());
-  }
-
-  set_no_result(x);
-
-  // The CodeEmitInfo must be duplicated for each different
-  // LIR-instruction because spilling can occur anywhere between two
-  // instructions and so the debug information must be different.
-  CodeEmitInfo* range_check_info = state_for (x);
-  CodeEmitInfo* null_check_info = NULL;
-  if (x->needs_null_check()) {
-    null_check_info = new CodeEmitInfo(range_check_info);
-  }
-
-  // Emit array address setup early so it schedules better.
-  LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-  if (value.result()->is_constant() && array_addr->index()->is_valid()) {
-    // Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
-    LIR_Opr tmp = new_pointer_register();
-    __ leal(LIR_OprFact::address(array_addr), tmp);
-    array_addr = new LIR_Address(tmp, x->elt_type());
-  }
-
-  if (GenerateRangeChecks && needs_range_check) {
-    if (use_length) {
-      __ cmp(lir_cond_belowEqual, length.result(), index.result());
-      __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
-    } else {
-      array_range_check(array.result(), index.result(), null_check_info, range_check_info);
-      // Range_check also does the null check.
-      null_check_info = NULL;
-    }
-  }
-
-  if (GenerateArrayStoreCheck && needs_store_check) {
-    LIR_Opr tmp1 = new_register(objectType);
-    LIR_Opr tmp2 = new_register(objectType);
-    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-
-    CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
-    __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
-  }
-
-  if (obj_store) {
-    // Needs GC write barriers.
-    pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-  }
-
-  LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
-  __ move(result, array_addr, null_check_info);
-
-  if (obj_store) {
-    // Precise card mark
-    post_barrier(LIR_OprFact::address(array_addr), value.result());
-  }
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+  LIR_Opr tmp1 = new_register(objectType);
+  LIR_Opr tmp2 = new_register(objectType);
+  LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
+  __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 }
 
 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
@@ -665,59 +587,42 @@
   }
 }
 
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
-  assert(x->number_of_arguments() == 4, "wrong type");
-  LIRItem obj   (x->argument_at(0), this);  // object
-  LIRItem offset(x->argument_at(1), this);  // offset of field
-  LIRItem cmp   (x->argument_at(2), this);  // Value to compare with field.
-  LIRItem val   (x->argument_at(3), this);  // Replace field with val if matches cmp.
-
-  // Get address of field.
-  obj.load_item();
-  offset.load_nonconstant(20);
-  cmp.load_item();
-  val.load_item();
-
-  LIR_Opr addr = new_pointer_register();
-  LIR_Address* a;
-  if (offset.result()->is_constant()) {
-    assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
-    a = new LIR_Address(obj.result(),
-                        offset.result()->as_jlong(),
-                        as_BasicType(type));
-  } else {
-    a = new LIR_Address(obj.result(),
-                        offset.result(),
-                        0,
-                        as_BasicType(type));
-  }
-  __ leal(LIR_OprFact::address(a), addr);
-
-  if (type == objectType) {  // Write-barrier needed for Object fields.
-    pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-  }
-
-  LIR_Opr ill = LIR_OprFact::illegalOpr;  // for convenience
-  if (type == objectType) {
-    __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
-  } else if (type == intType) {
-    __ cas_int(addr, cmp.result(), val.result(), ill, ill);
-  } else if (type == longType) {
-    __ cas_long(addr, cmp.result(), val.result(), ill, ill);
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+  LIR_Opr t1 = LIR_OprFact::illegalOpr;
+  LIR_Opr t2 = LIR_OprFact::illegalOpr;
+  cmp_value.load_item();
+  new_value.load_item();
+  if (type == T_OBJECT) {
+    if (UseCompressedOops) {
+      t1 = new_register(T_OBJECT);
+      t2 = new_register(T_OBJECT);
+    }
+    __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+  } else if (type == T_INT) {
+    __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+  } else if (type == T_LONG) {
+    __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
   } else {
     ShouldNotReachHere();
   }
   // Generate conditional move of boolean result.
-  LIR_Opr result = rlock_result(x);
+  LIR_Opr result = new_register(T_INT);
   __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
-           result, as_BasicType(type));
-  if (type == objectType) {  // Write-barrier needed for Object fields.
-    // Precise card mark since could either be object or array
-    post_barrier(addr, val.result());
-  }
+           result, type);
+  return result;
 }
 
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+  Unimplemented(); // Currently not supported on this platform.
+  return LIR_OprFact::illegalOpr;
+}
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+  LIR_Opr result = new_register(type);
+  value.load_item();
+  __ xadd(addr, value.result(), result, LIR_OprFact::illegalOpr);
+  return result;
+}
 
 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
   switch (x->id()) {
@@ -970,7 +875,7 @@
   LIRItem obj(x->obj(), this);
 
   CodeEmitInfo* patching_info = NULL;
-  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
+  if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
     // Must do this before locking the destination register as an oop register,
     // and before the obj is loaded (the latter is for deoptimization).
     patching_info = state_for (x, x->state_before());
@@ -1104,57 +1009,6 @@
   __ load(address, result, info);
 }
 
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
-                                     BasicType type, bool is_volatile) {
-  LIR_Address* addr = new LIR_Address(src, offset, type);
-  bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-  if (is_obj) {
-    // Do the pre-write barrier, if any.
-    pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
-                true /* do_load */, false /* patch */, NULL);
-    __ move(data, addr);
-    assert(src->is_register(), "must be register");
-    // Seems to be a precise address.
-    post_barrier(LIR_OprFact::address(addr), data);
-  } else {
-    __ move(data, addr);
-  }
-}
-
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
-                                     BasicType type, bool is_volatile) {
-  LIR_Address* addr = new LIR_Address(src, offset, type);
-  __ load(addr, dst);
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
-  BasicType type = x->basic_type();
-  assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
-  LIRItem src(x->object(), this);
-  LIRItem off(x->offset(), this);
-  LIRItem value(x->value(), this);
-
-  src.load_item();
-  value.load_item();
-  off.load_nonconstant(20);
-
-  LIR_Opr dst = rlock_result(x, type);
-  LIR_Opr data = value.result();
-  LIR_Opr offset = off.result();
-
-  LIR_Address* addr;
-  if (offset->is_constant()) {
-    assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
-    addr = new LIR_Address(src.result(), offset->as_jlong(), type);
-  } else {
-    addr = new LIR_Address(src.result(), offset, type);
-  }
-
-  __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
-}
-
 void LIRGenerator::do_update_CRC32(Intrinsic* x) {
   assert(UseCRC32Intrinsics, "or should not be here");
   LIR_Opr result = rlock_result(x);
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Wed May 02 09:16:10 2018 -0700
@@ -42,11 +42,6 @@
 #include "utilities/macros.hpp"
 #include "vmreg_s390.inline.hpp"
 #include "registerSaver_s390.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
 
 // Implementation of StubAssembler
 
@@ -190,15 +185,6 @@
   return RegisterSaver::save_live_registers(sasm, reg_set);
 }
 
-static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
-  __ block_comment("save_volatile_registers");
-  RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
-  int frame_size_in_slots =
-    RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
-  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
-  return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
-}
-
 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
   __ block_comment("restore_live_registers");
   RegisterSaver::RegisterSet reg_set =
@@ -214,12 +200,6 @@
   RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
 }
 
-static void restore_volatile_registers(StubAssembler* sasm) {
-  __ block_comment("restore_volatile_registers");
-  RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
-  RegisterSaver::restore_live_registers(sasm, reg_set);
-}
-
 void Runtime1::initialize_pd() {
   // Nothing to do.
 }
@@ -764,160 +744,6 @@
       break;
 #endif // TODO
 
-#if INCLUDE_ALL_GCS
-    case g1_pre_barrier_slow_id:
-      { // Z_R1_scratch: previous value of memory
-
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ should_not_reach_here(FILE_AND_LINE);
-          break;
-        }
-
-        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
-        Register pre_val = Z_R1_scratch;
-        Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
-        Register tmp2 = Z_R7;
-
-        Label refill, restart, marking_not_active;
-        int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
-        int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
-        int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
-
-        // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
-        __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
-        // Is marking still active?
-        if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
-          __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
-        } else {
-          assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
-          __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
-        }
-        __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
-
-        __ bind(restart);
-        // Load the index into the SATB buffer. SATBMarkQueue::_index is a
-        // size_t so ld_ptr is appropriate.
-        __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
-
-        // index == 0?
-        __ z_brz(refill);
-
-        __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
-        __ add2reg(tmp, -oopSize);
-
-        __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
-        __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
-
-        __ bind(marking_not_active);
-        // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
-        __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_br(Z_R14);
-
-        __ bind(refill);
-        save_volatile_registers(sasm);
-        __ z_lgr(tmp, pre_val); // save pre_val
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
-                        Z_thread);
-        __ z_lgr(pre_val, tmp); // restore pre_val
-        restore_volatile_registers(sasm);
-        __ z_bru(restart);
-      }
-      break;
-
-    case g1_post_barrier_slow_id:
-      { // Z_R1_scratch: oop address, address of updated memory slot
-        BarrierSet* bs = BarrierSet::barrier_set();
-        if (bs->kind() != BarrierSet::G1BarrierSet) {
-          __ should_not_reach_here(FILE_AND_LINE);
-          break;
-        }
-
-        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
-        Register addr_oop  = Z_R1_scratch;
-        Register addr_card = Z_R1_scratch;
-        Register r1        = Z_R6; // Must be saved/restored.
-        Register r2        = Z_R7; // Must be saved/restored.
-        Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
-        jbyte* byte_map_base = ci_card_table_address();
-
-        // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
-        __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
-        Label not_already_dirty, restart, refill, young_card;
-
-        // Calculate address of card corresponding to the updated oop slot.
-        AddressLiteral rs(byte_map_base);
-        __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
-        addr_oop = noreg; // dead now
-        __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
-        __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
-
-        __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
-        __ z_bre(young_card);
-
-        __ z_sync(); // Required to support concurrent cleaning.
-
-        __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
-        __ z_brne(not_already_dirty);
-
-        __ bind(young_card);
-        // We didn't take the branch, so we're already dirty: restore
-        // used registers and return.
-        __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_br(Z_R14);
-
-        // Not dirty.
-        __ bind(not_already_dirty);
-
-        // First, dirty it: [addr_card] := 0
-        __ z_mvi(0, addr_card, CardTable::dirty_card_val());
-
-        Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
-        Register buf = r2;
-        cardtable = noreg; // now dead
-
-        // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
-        __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
-        ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
-        ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
-
-        __ bind(restart);
-
-        // Get the index into the update buffer. DirtyCardQueue::_index is
-        // a size_t so z_ltg is appropriate here.
-        __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
-
-        // index == 0?
-        __ z_brz(refill);
-
-        __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
-        __ add2reg(idx, -oopSize);
-
-        __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
-        __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
-        // Restore killed registers and return.
-        __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-        __ z_br(Z_R14);
-
-        __ bind(refill);
-        save_volatile_registers(sasm);
-        __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
-        __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
-                                         Z_thread);
-        __ z_lgr(addr_card, idx);
-        restore_volatile_registers(sasm); // Restore addr_card.
-        __ z_bru(restart);
-      }
-      break;
-#endif // INCLUDE_ALL_GCS
     case predicate_failed_trap_id:
       {
         __ set_info("predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp	Wed May 02 09:16:10 2018 -0700
@@ -33,6 +33,11 @@
 #include "gc/g1/heapRegion.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "runtime/sharedRuntime.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
 
 #define __ masm->
 
@@ -406,4 +411,209 @@
   __ bind(Ldone);
 }
 
+#ifdef COMPILER1
+
 #undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  // At this point we know that marking is in progress.
+  // If do_load() is true then we have to emit the
+  // load of the previous value; otherwise it has already
+  // been loaded into _pre_val.
+  __ bind(*stub->entry());
+  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
+  assert(stub->pre_val()->is_register(), "Precondition.");
+
+  Register pre_val_reg = stub->pre_val()->as_register();
+
+  if (stub->do_load()) {
+    ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+  }
+
+  __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
+  __ branch_optimized(Assembler::bcondZero, *stub->continuation());
+  ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
+  __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+  G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+  __ bind(*stub->entry());
+  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
+  assert(stub->addr()->is_register(), "Precondition.");
+  assert(stub->new_val()->is_register(), "Precondition.");
+  Register new_val_reg = stub->new_val()->as_register();
+  __ z_ltgr(new_val_reg, new_val_reg);
+  __ branch_optimized(Assembler::bcondZero, *stub->continuation());
+  __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
+  ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
+  __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
+  __ block_comment("save_volatile_registers");
+  RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
+  int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
+  sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
+  return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
+}
+
+static void restore_volatile_registers(StubAssembler* sasm) {
+  __ block_comment("restore_volatile_registers");
+  RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
+  RegisterSaver::restore_live_registers(sasm, reg_set);
+}
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+  // Z_R1_scratch: previous value of memory
+
+  BarrierSet* bs = BarrierSet::barrier_set();
+  __ set_info("g1_pre_barrier_slow_id", false);
+
+  Register pre_val = Z_R1_scratch;
+  Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
+  Register tmp2 = Z_R7;
+
+  Label refill, restart, marking_not_active;
+  int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+  int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+  int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+  // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
+  __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+  // Is marking still active?
+  if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+    __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
+  } else {
+    assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+    __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
+  }
+  __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
+
+  __ bind(restart);
+  // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+  // size_t so ld_ptr is appropriate.
+  __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
+
+  // index == 0?
+  __ z_brz(refill);
+
+  __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
+  __ add2reg(tmp, -oopSize);
+
+  __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
+  __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
+
+  __ bind(marking_not_active);
+  // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
+  __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_br(Z_R14);
+
+  __ bind(refill);
+  save_volatile_registers(sasm);
+  __ z_lgr(tmp, pre_val); // save pre_val
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
+                  Z_thread);
+  __ z_lgr(pre_val, tmp); // restore pre_val
+  restore_volatile_registers(sasm);
+  __ z_bru(restart);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+  // Z_R1_scratch: oop address, address of updated memory slot
+
+  BarrierSet* bs = BarrierSet::barrier_set();
+  __ set_info("g1_post_barrier_slow_id", false);
+
+  Register addr_oop  = Z_R1_scratch;
+  Register addr_card = Z_R1_scratch;
+  Register r1        = Z_R6; // Must be saved/restored.
+  Register r2        = Z_R7; // Must be saved/restored.
+  Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
+  CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+  CardTable* ct = ctbs->card_table();
+  jbyte* byte_map_base = ct->byte_map_base();
+
+  // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
+  __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+  Label not_already_dirty, restart, refill, young_card;
+
+  // Calculate address of card corresponding to the updated oop slot.
+  AddressLiteral rs(byte_map_base);
+  __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
+  addr_oop = noreg; // dead now
+  __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
+  __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
+
+  __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
+  __ z_bre(young_card);
+
+  __ z_sync(); // Required to support concurrent cleaning.
+
+  __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
+  __ z_brne(not_already_dirty);
+
+  __ bind(young_card);
+  // We didn't take the branch, so we're already dirty: restore
+  // used registers and return.
+  __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_br(Z_R14);
+
+  // Not dirty.
+  __ bind(not_already_dirty);
+
+  // First, dirty it: [addr_card] := 0
+  __ z_mvi(0, addr_card, CardTable::dirty_card_val());
+
+  Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
+  Register buf = r2;
+  cardtable = noreg; // now dead
+
+  // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
+  __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+  ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
+  ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
+
+  __ bind(restart);
+
+  // Get the index into the update buffer. DirtyCardQueue::_index is
+  // a size_t so z_ltg is appropriate here.
+  __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
+
+  // index == 0?
+  __ z_brz(refill);
+
+  __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
+  __ add2reg(idx, -oopSize);
+
+  __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
+  __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
+  // Restore killed registers and return.
+  __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+  __ z_br(Z_R14);
+
+  __ bind(refill);
+  save_volatile_registers(sasm);
+  __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
+                                   Z_thread);
+  __ z_lgr(addr_card, idx);
+  restore_volatile_registers(sasm); // Restore addr_card.
+  __ z_bru(restart);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp	Wed May 02 09:16:10 2018 -0700
@@ -28,6 +28,12 @@
 
 #include "asm/macroAssembler.hpp"
 #include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
 
 class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
  protected:
@@ -50,6 +56,14 @@
                             const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3);
 
  public:
+#ifdef COMPILER1
+  void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+  void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+  void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+  void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
   virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                        const Address& src, Register dst, Register tmp1, Register tmp2, Label *is_null = NULL);
 
--- a/src/hotspot/cpu/s390/s390.ad	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/s390/s390.ad	Wed May 02 09:16:10 2018 -0700
@@ -8401,16 +8401,6 @@
   ins_pipe(pipe_class_dummy);
 %}
 
-instruct compU_reg_imm0(flagsReg cr, iRegI op1, immI_0 zero) %{
-  match(Set cr (CmpU op1 zero));
-  ins_cost(DEFAULT_COST_LOW);
-  size(2);
-  format %{ "LTR     $op1,$op1\t # unsigned" %}
-  opcode(LTR_ZOPC);
-  ins_encode(z_rrform(op1, op1));
-  ins_pipe(pipe_class_dummy);
-%}
-
 instruct compU_reg_mem(flagsReg cr, iRegI op1, memory op2)%{
   match(Set cr (CmpU op1 (LoadI op2)));
   ins_cost(MEMORY_REF_COST);
--- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp	Wed May 02 15:11:54 2018 +0530
+++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp	Wed May 02 09:16:10 2018 -0700
@@ -32,9 +32,6 @@
 #include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_sparc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
 
 #define __ ce->masm()->
 
@@ -454,63 +451,4 @@
   __ delayed()->nop();
 }
 
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
-  // At this point we know that marking is in progress.
-  // If do_load() is true then we have to emit the
-  // load of the previous value; otherwise it has already
-  // been loaded into _pre_val.
-
-  __ bind(_entry);
-
-  assert(pre_val()->is_register(), "Precondition.");
-  Register pre_val_reg = pre_val()->as_register();
-
-  if (do_load()) {
-    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
-  }
-
-  if (__ is_in_wdisp16_range(_continuation)) {
-    __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
-  } else {
-    __ cmp(pre_val_reg, G0);
-    __ brx(Assembler::equal, false, Assembler::pn, _continuation);
-  }
-  __ delayed()->nop();
-
-  __ call(Runtime1::entry_for(Runtime1::Runti