changeset 13400:23d7878acfb7 nestmates

Automatic merge with default
author stsmirno
date Thu, 10 Aug 2017 04:49:27 -0700
parents 5bd16e6e1135 6ad02163b738
children 073ad89cfa90
files make/templates/gpl-cp-header make/templates/gpl-header src/cpu/aarch64/vm/debug_aarch64.cpp src/cpu/arm/vm/debug_arm.cpp src/cpu/ppc/vm/debug_ppc.cpp src/cpu/ppc/vm/globals_ppc.hpp src/cpu/s390/vm/debug_s390.cpp src/cpu/sparc/vm/debug_sparc.cpp src/cpu/sparc/vm/globals_sparc.hpp src/cpu/x86/vm/abstractInterpreter_x86.cpp src/cpu/x86/vm/debug_x86.cpp src/cpu/x86/vm/frame_x86.cpp src/cpu/x86/vm/globals_x86.hpp src/cpu/x86/vm/interpreterRT_x86.hpp src/cpu/x86/vm/interpreterRT_x86_32.cpp src/cpu/x86/vm/interpreterRT_x86_64.cpp src/cpu/x86/vm/sharedRuntime_x86_32.cpp src/cpu/x86/vm/sharedRuntime_x86_64.cpp src/cpu/x86/vm/templateTable_x86.cpp src/cpu/zero/vm/debug_zero.cpp src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLowering.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64SuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64SuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/LocationIdentity.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.sparc/src/org/graalvm/compiler/core/sparc/SPARCSuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest1.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest2.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest3.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest4.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest5.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTest6.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTestInterception01.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/MethodMetricsTestInterception02.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/debug/VerifyMethodMetricsTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/GraalDebugInitializationParticipant.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug.test/src/org/graalvm/compiler/debug/test/DebugHistogramTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug.test/src/org/graalvm/compiler/debug/test/DebugTimerTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/Debug.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugConfigCustomizer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugConfigScope.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugCounter.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugEnvironment.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugHistogram.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugInitializationParticipant.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugMethodMetrics.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugRetryableTask.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugTimer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DebugValueFactory.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DelegatingDebugConfig.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/Fingerprint.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/GraalDebugConfig.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/TopLevelDebugConfig.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/AccumulatedDebugValue.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/CloseableCounterImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/CounterImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugHistogramAsciiPrinter.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugHistogramImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugHistogramRPrinter.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugScope.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugValue.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugValueMap.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/DebugValuesPrinter.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/KeyRegistry.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/MemUseTrackerImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/TimerImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/method/MethodMetricsImpl.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/method/MethodMetricsInlineeScopeInfo.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/method/MethodMetricsPrinter.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/internal/method/MethodMetricsRootScopeInfo.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotLIRKindTool.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRKindTool.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotSuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.sparc/src/org/graalvm/compiler/hotspot/sparc/SPARCHotSpotLIRKindTool.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/RetryableCompilationTest.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/FingerprintUtil.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/nodes/CompressionNode.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/nodes/type/HotSpotLIRKindTool.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/nodes/type/NarrowOopStamp.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/DefaultSuitesProvider.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceIntervalWalker.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.options/src/org/graalvm/compiler/options/UniquePathUtilities.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/DominatorConditionalEliminationPhase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/NewConditionalEliminationPhase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/GraalDebugConfigCustomizer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/WordOperationPlugin.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/AtomicUnsigned.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/AtomicWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/ComparableWord.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/Pointer.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/PointerBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/PointerUtils.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/Signed.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/Unsigned.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/UnsignedUtils.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/WordBase.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.word/src/org/graalvm/compiler/word/nodes/WordCastNode.java src/os/aix/vm/interfaceSupport_aix.hpp src/os/bsd/vm/interfaceSupport_bsd.hpp src/os/bsd/vm/stubRoutines_bsd.cpp src/os/linux/vm/interfaceSupport_linux.hpp src/os/linux/vm/stubRoutines_linux.cpp src/os/solaris/vm/interfaceSupport_solaris.hpp src/os/solaris/vm/stubRoutines_solaris.cpp src/os/windows/vm/interfaceSupport_windows.hpp src/os/windows/vm/stubRoutines_windows.cpp src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp src/share/vm/ci/bcEscapeAnalyzer.cpp src/share/vm/ci/ciEnv.cpp src/share/vm/ci/ciInstance.cpp src/share/vm/ci/ciInstanceKlass.cpp src/share/vm/ci/ciInstanceKlass.hpp src/share/vm/ci/ciMethod.cpp src/share/vm/ci/ciMethodBlocks.cpp src/share/vm/ci/ciReplay.cpp src/share/vm/ci/ciTypeFlow.cpp src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/classFileParser.hpp src/share/vm/classfile/classLoader.cpp src/share/vm/classfile/javaClasses.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/systemDictionary.hpp src/share/vm/classfile/vmSymbols.cpp src/share/vm/classfile/vmSymbols.hpp src/share/vm/code/codeBlob.cpp src/share/vm/code/codeBlob.hpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc/parallel/psCompactionManager.cpp src/share/vm/gc/parallel/psParallelCompact.cpp src/share/vm/gc/parallel/psPromotionManager.cpp src/share/vm/interpreter/abstractInterpreter.cpp src/share/vm/interpreter/abstractInterpreter.hpp src/share/vm/interpreter/bytecode.hpp src/share/vm/interpreter/bytecodeTracer.cpp src/share/vm/interpreter/bytecodes.cpp src/share/vm/interpreter/interpreterRuntime.cpp src/share/vm/interpreter/interpreterRuntime.hpp src/share/vm/interpreter/linkResolver.cpp src/share/vm/interpreter/oopMapCache.cpp src/share/vm/interpreter/rewriter.cpp src/share/vm/jvmci/jvmciCompilerToVM.cpp src/share/vm/logging/logStream.inline.hpp src/share/vm/memory/freeBlockDictionary.cpp src/share/vm/memory/freeBlockDictionary.hpp src/share/vm/oops/arrayOop.hpp src/share/vm/oops/cpCache.cpp src/share/vm/oops/cpCache.hpp src/share/vm/oops/generateOopMap.cpp src/share/vm/oops/generateOopMap.hpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/instanceKlass.hpp src/share/vm/oops/klass.hpp src/share/vm/oops/klassVtable.cpp src/share/vm/oops/klassVtable.hpp src/share/vm/oops/method.cpp src/share/vm/oops/methodData.cpp src/share/vm/oops/oop.hpp src/share/vm/oops/oop.inline.hpp src/share/vm/opto/buildOopMap.cpp src/share/vm/opto/cfgnode.cpp src/share/vm/opto/chaitin.cpp src/share/vm/opto/classes.hpp src/share/vm/opto/compile.cpp src/share/vm/opto/compile.hpp src/share/vm/opto/escape.cpp src/share/vm/opto/graphKit.cpp src/share/vm/opto/graphKit.hpp src/share/vm/opto/lcm.cpp src/share/vm/opto/library_call.cpp src/share/vm/opto/live.cpp src/share/vm/opto/loopopts.cpp src/share/vm/opto/machnode.cpp src/share/vm/opto/macro.cpp src/share/vm/opto/macroArrayCopy.cpp src/share/vm/opto/matcher.cpp src/share/vm/opto/memnode.cpp src/share/vm/opto/parse1.cpp src/share/vm/opto/parse2.cpp src/share/vm/opto/parse3.cpp src/share/vm/opto/parseHelper.cpp src/share/vm/opto/runtime.cpp src/share/vm/opto/type.cpp src/share/vm/opto/type.hpp src/share/vm/prims/jni.cpp src/share/vm/prims/jvm.cpp src/share/vm/prims/jvm.h src/share/vm/prims/whitebox.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/deoptimization.hpp src/share/vm/runtime/frame.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/handles.hpp src/share/vm/runtime/javaCalls.cpp src/share/vm/runtime/reflection.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/sharedRuntime.hpp src/share/vm/utilities/globalDefinitions.hpp test/TEST.groups test/compiler/cpuflags/predicate/AESSupportPredicate.java test/compiler/rtm/cli/TestRTMAbortRatioOptionOnSupportedConfig.java test/compiler/rtm/cli/TestRTMAbortRatioOptionOnUnsupportedConfig.java test/compiler/rtm/cli/TestRTMTotalCountIncrRateOptionOnUnsupportedConfig.java test/compiler/testlibrary/rtm/predicate/SupportedCPU.java test/compiler/testlibrary/rtm/predicate/SupportedOS.java test/compiler/testlibrary/rtm/predicate/SupportedVM.java test/runtime/modules/JVMAddModulePackage.java
diffstat 1923 files changed, 49364 insertions(+), 37703 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon May 15 14:13:57 2017 -0400
+++ b/.hgtags	Thu Aug 10 04:49:27 2017 -0700
@@ -576,3 +576,19 @@
 8295ca08f5cb09c090eb048bbdd338d7e270c8bf jdk-10+4
 7b5ca2ff1f78873ca3ee99b6589d3cb4dde2e454 jdk-10+5
 762465099d938fd96cd1efda193bc1fa23d070d3 jdk-10+6
+1ca7ed1b17b5776930d641d1379834f3140a74e4 jdk-9+167
+fbb9c802649585d19f6d7e81b4a519d44806225a jdk-9+168
+16d692be099c5c38eb48cc9aca78b0c900910d5b jdk-9+169
+38a240fd58a287acb1963920b92ed4d9c2fd39e3 jdk-9+170
+9d4746eca95aec3e5a344bf2520745dcc1d17eed jdk-10+7
+f5ded0cf954c770deeecb80f2ba1ba6a05cd979b jdk-10+8
+233647e3d3800e76d7612014b745b37a88098f63 jdk-10+9
+d53171650a2cc6c6f699c966c533b914ca9c0602 jdk-9+171
+c6cd3ec8d46b034e57c86399380ffcf7f25706e4 jdk-10+10
+1ae9e84f68b359420d2d153ecfe5ee2903e33a2e jdk-9+172
+7f14e550f1e8abea41c223e5fdad2261e99ba929 jdk-10+11
+e64b1cb48d6e7703928a9d1da106fc27f8cb65fd jdk-9+173
+944791f8160185bffa13fbb821fc09b6198f1f25 jdk-9+174
+070aa7a2eb14c4645f7eb31384cba0a2ba72a4b5 jdk-10+12
+8f04d457168b9f1f4a1b2c37f49e0513ca9d33a7 jdk-9+175
+a9da03357f190807591177fe9846d6e68ad64fc0 jdk-10+13
--- a/.jcheck/conf	Mon May 15 14:13:57 2017 -0400
+++ b/.jcheck/conf	Thu Aug 10 04:49:27 2017 -0700
@@ -1,1 +1,2 @@
 project=jdk10
+bugids=dup
--- a/.mx.jvmci/.pydevproject	Mon May 15 14:13:57 2017 -0400
+++ b/.mx.jvmci/.pydevproject	Thu Aug 10 04:49:27 2017 -0700
@@ -3,7 +3,7 @@
 <pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
 <pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
 <pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/.mx.jvmci</path>
+<path>/mx.jvmci</path>
 </pydev_pathproperty>
 <pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
 <path>/mx</path>
--- a/.mx.jvmci/mx_jvmci.py	Mon May 15 14:13:57 2017 -0400
+++ b/.mx.jvmci/mx_jvmci.py	Thu Aug 10 04:49:27 2017 -0700
@@ -303,9 +303,9 @@
                         out.close('link')
 
                     out.open('link')
-                    out.element('name', data='generated')
+                    out.element('name', data='gensrc')
                     out.element('type', data='2')
-                    generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'generated')
+                    generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'gensrc')
                     out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(generated, eclProjectDir))
                     out.close('link')
 
@@ -620,18 +620,12 @@
 def _get_hotspot_build_dir(jvmVariant=None, debugLevel=None):
     """
     Gets the directory in which a particular HotSpot configuration is built
-    (e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/bsd_amd64_compiler2)
+    (e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/variant-<variant>)
     """
     if jvmVariant is None:
         jvmVariant = _vm.jvmVariant
 
-    os = mx.get_os()
-    if os == 'darwin':
-        os = 'bsd'
-    arch = mx.get_arch()
-    buildname = {'client': 'compiler1', 'server': 'compiler2'}.get(jvmVariant, jvmVariant)
-
-    name = '{}_{}_{}'.format(os, arch, buildname)
+    name = 'variant-{}'.format(jvmVariant)
     return join(_get_jdk_build_dir(debugLevel=debugLevel), 'hotspot', name)
 
 class JVMCI9JDKConfig(mx.JDKConfig):
--- a/make/CompileTools.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/CompileTools.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -47,6 +47,7 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
+          $(SRC_DIR)/org.graalvm.word/src \
           $(SRC_DIR)/org.graalvm.compiler.core/src \
           $(SRC_DIR)/org.graalvm.compiler.core.common/src \
           $(SRC_DIR)/org.graalvm.compiler.core.match.processor/src \
@@ -114,6 +115,7 @@
   $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \
       SETUP := GENERATE_OLDBYTECODE, \
       SRC := \
+          $(SRC_DIR)/org.graalvm.word/src \
           $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
           $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
           $(SRC_DIR)/org.graalvm.compiler.code/src \
--- a/make/gensrc/GensrcAdlc.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/gensrc/GensrcAdlc.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@
       OUTPUT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/adlc, \
       PROGRAM := adlc, \
       DEBUG_SYMBOLS := false, \
-      DISABLED_WARNINGS_clang := parentheses tautological-compare, \
+      DISABLED_WARNINGS_clang := tautological-compare, \
       DISABLED_WARNINGS_solstudio := notemsource, \
   ))
 
--- a/make/lib/CompileDtracePostJvm.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/lib/CompileDtracePostJvm.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
         CXX := $(BUILD_CXX), \
         LDEXE := $(BUILD_CXX), \
         generateJvmOffsets.cpp_CXXFLAGS := $(JVM_CFLAGS) -mt -xnolib -norunpath, \
-        generateJvmOffsetsMain.c_CFLAGS := -library=%none -mt -m64 -norunpath -z nodefs, \
+        generateJvmOffsetsMain.c_CFLAGS := -mt -m64 -norunpath -z nodefs, \
         LDFLAGS := -m64, \
         LIBS := -lc, \
         OBJECT_DIR := $(JVM_VARIANT_OUTPUTDIR)/tools/dtrace-gen-offsets/objs, \
--- a/make/lib/CompileGtest.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/lib/CompileGtest.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,8 @@
 	$(call create-mapfile)
 endif
 
-# Disabling switch warning for clang because of test source.
+# Disabling undef, switch, format-nonliteral and tautological-undefined-compare
+# warnings for clang because of test source.
 
 # Note: On AIX, the gtest test classes linked into the libjvm.so push the TOC
 # size beyond 64k, so we need to link with bigtoc. However, this means that
--- a/make/lib/CompileJvm.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/lib/CompileJvm.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@
     -DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
     -DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
     -DINCLUDE_SUFFIX_CPU=_$(HOTSPOT_TARGET_CPU_ARCH) \
+    -DINCLUDE_SUFFIX_COMPILER=_$(HOTSPOT_TOOLCHAIN_TYPE) \
     -DTARGET_COMPILER_$(HOTSPOT_TOOLCHAIN_TYPE) \
     -D$(HOTSPOT_TARGET_CPU_DEFINE) \
     -DHOTSPOT_LIB_ARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' \
@@ -217,9 +218,7 @@
     CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
     CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
     vm_version.cpp_CXXFLAGS := $(CFLAGS_VM_VERSION), \
-    DISABLED_WARNINGS_clang := delete-non-virtual-dtor dynamic-class-memaccess \
-        empty-body format logical-op-parentheses parentheses \
-        parentheses-equality switch tautological-compare, \
+    DISABLED_WARNINGS_clang := tautological-compare, \
     DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 \
         1540-1088 1500-010, \
     ASFLAGS := $(JVM_ASFLAGS), \
--- a/make/symbols/symbols-unix	Mon May 15 14:13:57 2017 -0400
+++ b/make/symbols/symbols-unix	Thu Aug 10 04:49:27 2017 -0700
@@ -188,7 +188,6 @@
 JVM_AddModuleExports
 JVM_AddModuleExportsToAll
 JVM_AddModuleExportsToAllUnnamed
-JVM_AddModulePackage
 JVM_AddReadsModule
 JVM_DefineModule
 JVM_SetBootLoaderUnnamedModule
--- a/make/templates/gpl-cp-header	Mon May 15 14:13:57 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,22 +0,0 @@
-Copyright (c) %YEARS%, Oracle and/or its affiliates. All rights reserved.
-DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
-This code is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License version 2 only, as
-published by the Free Software Foundation.  Oracle designates this
-particular file as subject to the "Classpath" exception as provided
-by Oracle in the LICENSE file that accompanied this code.
-
-This code is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-version 2 for more details (a copy is included in the LICENSE file that
-accompanied this code).
-
-You should have received a copy of the GNU General Public License version
-2 along with this work; if not, write to the Free Software Foundation,
-Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-or visit www.oracle.com if you need additional information or have any
-questions.
--- a/make/templates/gpl-header	Mon May 15 14:13:57 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-Copyright (c) %YEARS%, Oracle and/or its affiliates. All rights reserved.
-DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-
-This code is free software; you can redistribute it and/or modify it
-under the terms of the GNU General Public License version 2 only, as
-published by the Free Software Foundation.
-
-This code is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-version 2 for more details (a copy is included in the LICENSE file that
-accompanied this code).
-
-You should have received a copy of the GNU General Public License version
-2 along with this work; if not, write to the Free Software Foundation,
-Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-or visit www.oracle.com if you need additional information or have any
-questions.
--- a/make/test/JtregNative.gmk	Mon May 15 14:13:57 2017 -0400
+++ b/make/test/JtregNative.gmk	Thu Aug 10 04:49:27 2017 -0700
@@ -45,6 +45,7 @@
 BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
     $(HOTSPOT_TOPDIR)/test/gc/g1/TestJNIWeakG1 \
     $(HOTSPOT_TOPDIR)/test/gc/stress/gclocker \
+    $(HOTSPOT_TOPDIR)/test/gc/cslocker \
     $(HOTSPOT_TOPDIR)/test/native_sanity \
     $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \
     $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \
@@ -61,6 +62,7 @@
     $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
     $(HOTSPOT_TOPDIR)/test/compiler/calls \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
+    $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/IsModifiableModule \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleReads \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleExportsAndOpens \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleUsesAndProvides \
@@ -91,6 +93,7 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libIsModifiableModuleTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleReadsTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleExportsAndOpensTest := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libAddModuleUsesAndProvidesTest := -lc
--- a/src/cpu/aarch64/vm/aarch64.ad	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/aarch64.ad	Thu Aug 10 04:49:27 2017 -0700
@@ -577,7 +577,7 @@
     R26
  /* R27, */                     // heapbase
  /* R28, */                     // thread
- /* R29, */                     // fp
+    R29,                        // fp
  /* R30, */                     // lr
  /* R31 */                      // sp
 );
@@ -646,7 +646,7 @@
     R26, R26_H,
  /* R27, R27_H, */              // heapbase
  /* R28, R28_H, */              // thread
- /* R29, R29_H, */              // fp
+    R29, R29_H,                 // fp
  /* R30, R30_H, */              // lr
  /* R31, R31_H */               // sp
 );
@@ -5218,7 +5218,7 @@
   // ppc port uses 0 but we definitely need to allow for fixed_slots
   // which folds in the space used for monitors
   return_addr(STACK - 2 +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
@@ -5343,6 +5343,17 @@
   interface(CONST_INTER);
 %}
 
+// Shift values for add/sub extension shift
+operand immIExt()
+%{
+  predicate(0 <= n->get_int() && (n->get_int() <= 4));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immI_le_4()
 %{
   predicate(n->get_int() <= 4);
@@ -5423,6 +5434,16 @@
   interface(CONST_INTER);
 %}
 
+operand immI_63()
+%{
+  predicate(n->get_int() == 63);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 operand immI_64()
 %{
   predicate(n->get_int() == 64);
@@ -5453,20 +5474,10 @@
   interface(CONST_INTER);
 %}
 
-operand immL_63()
-%{
-  predicate(n->get_int() == 63);
-  match(ConI);
-
-  op_cost(0);
-  format %{ %}
-  interface(CONST_INTER);
-%}
-
 operand immL_255()
 %{
-  predicate(n->get_int() == 255);
-  match(ConI);
+  predicate(n->get_long() == 255L);
+  match(ConL);
 
   op_cost(0);
   format %{ %}
@@ -10951,7 +10962,7 @@
 
 // Long Negation
 
-instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
+instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
   match(Set dst (SubL zero src));
 
   ins_cost(INSN_COST);
@@ -11146,7 +11157,7 @@
   ins_pipe(ldiv_reg_reg);
 %}
 
-instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
+instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{
   match(Set dst (URShiftL (RShiftL src1 div1) div2));
   ins_cost(INSN_COST);
   format %{ "lsr $dst, $src1, $div1" %}
@@ -11156,7 +11167,7 @@
   ins_pipe(ialu_reg_shift);
 %}
 
-instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
+instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{
   match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
   ins_cost(INSN_COST);
   format %{ "add $dst, $src, $div1" %}
@@ -12789,7 +12800,7 @@
 %{
   match(Set dst (AddL src1 (ConvI2L src2)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxtw $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxtw" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12802,7 +12813,7 @@
 %{
   match(Set dst (SubL src1 (ConvI2L src2)));
   ins_cost(INSN_COST);
-  format %{ "sub  $dst, $src1, sxtw $src2" %}
+  format %{ "sub  $dst, $src1, $src2, sxtw" %}
 
    ins_encode %{
      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12816,7 +12827,7 @@
 %{
   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxth $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxth" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12829,7 +12840,7 @@
 %{
   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxtb $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxtb" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12842,7 +12853,7 @@
 %{
   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, uxtb $src2" %}
+  format %{ "add  $dst, $src1, $src2, uxtb" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12855,7 +12866,7 @@
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxth $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxth" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12868,7 +12879,7 @@
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxtw $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxtw" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12881,7 +12892,7 @@
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, sxtb $src2" %}
+  format %{ "add  $dst, $src1, $src2, sxtb" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -12894,7 +12905,7 @@
 %{
   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
   ins_cost(INSN_COST);
-  format %{ "add  $dst, $src1, uxtb $src2" %}
+  format %{ "add  $dst, $src1, $src2, uxtb" %}
 
    ins_encode %{
      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -13034,6 +13045,294 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+
+instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+
+instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%};
+
+instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%};
+
+
+instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}
 // END This section of the file is automatically generated. Do not edit --------------
 
 // ============================================================================
@@ -14021,10 +14320,12 @@
   ins_pipe(pipe_class_memory);
 %}
 
-instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
-%{
+instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
+%{
+  predicate((u_int64_t)n->in(2)->get_long()
+            < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
   match(Set dummy (ClearArray cnt base));
-  effect(USE_KILL base, TEMP tmp);
+  effect(USE_KILL base);
 
   ins_cost(4 * INSN_COST);
   format %{ "ClearArray $cnt, $base" %}
@@ -15385,9 +15686,9 @@
   format %{ "ShouldNotReachHere" %}
 
   ins_encode %{
-    // TODO
-    // implement proper trap call here
-    __ brk(999);
+    // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
+    // return true
+    __ dpcs1(0xdead + 1);
   %}
 
   ins_pipe(pipe_class_default);
@@ -16775,6 +17076,48 @@
   ins_pipe(vmla128);
 %}
 
+// dst + src1 * src2
+instruct vmla2F(vecD dst, vecD src1, vecD src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 2);
+  match(Set dst (FmaVF  dst (Binary src1 src2)));
+  format %{ "fmla  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmla(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp64);
+%}
+
+// dst + src1 * src2
+instruct vmla4F(vecX dst, vecX src1, vecX src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 4);
+  match(Set dst (FmaVF  dst (Binary src1 src2)));
+  format %{ "fmla  $dst,$src1,$src2\t# vector (4S)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmla(as_FloatRegister($dst$$reg), __ T4S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp128);
+%}
+
+// dst + src1 * src2
+instruct vmla2D(vecX dst, vecX src1, vecX src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 2);
+  match(Set dst (FmaVD  dst (Binary src1 src2)));
+  format %{ "fmla  $dst,$src1,$src2\t# vector (2D)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmla(as_FloatRegister($dst$$reg), __ T2D,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp128);
+%}
+
 // --------------------------------- MLS --------------------------------------
 
 instruct vmls4S(vecD dst, vecD src1, vecD src2)
@@ -16834,6 +17177,51 @@
   ins_pipe(vmla128);
 %}
 
+// dst - src1 * src2
+instruct vmls2F(vecD dst, vecD src1, vecD src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 2);
+  match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
+  match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
+  format %{ "fmls  $dst,$src1,$src2\t# vector (2S)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmls(as_FloatRegister($dst$$reg), __ T2S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp64);
+%}
+
+// dst - src1 * src2
+instruct vmls4F(vecX dst, vecX src1, vecX src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 4);
+  match(Set dst (FmaVF  dst (Binary (NegVF src1) src2)));
+  match(Set dst (FmaVF  dst (Binary src1 (NegVF src2))));
+  format %{ "fmls  $dst,$src1,$src2\t# vector (4S)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmls(as_FloatRegister($dst$$reg), __ T4S,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp128);
+%}
+
+// dst - src1 * src2
+instruct vmls2D(vecX dst, vecX src1, vecX src2) %{
+  predicate(UseFMA && n->as_Vector()->length() == 2);
+  match(Set dst (FmaVD  dst (Binary (NegVD src1) src2)));
+  match(Set dst (FmaVD  dst (Binary src1 (NegVD src2))));
+  format %{ "fmls  $dst,$src1,$src2\t# vector (2D)" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ fmls(as_FloatRegister($dst$$reg), __ T2D,
+            as_FloatRegister($src1$$reg),
+            as_FloatRegister($src2$$reg));
+  %}
+  ins_pipe(vmuldiv_fp128);
+%}
+
 // --------------------------------- DIV --------------------------------------
 
 instruct vdiv2F(vecD dst, vecD src1, vecD src2)
--- a/src/cpu/aarch64/vm/aarch64_ad.m4	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/aarch64_ad.m4	Thu Aug 10 04:49:27 2017 -0700
@@ -268,21 +268,21 @@
   ins_pipe(ialu_reg_reg_vshift);
 %}')dnl
 define(ROL_INSN, `
-instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
+instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 %{
   match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
 
   expand %{
-    $3L_rReg(dst, src, shift, cr);
+    $3$1_rReg(dst, src, shift, cr);
   %}
 %}')dnl
 define(ROR_INSN, `
-instruct $3$1_rReg_Var_C$2(iRegLNoSp dst, iRegL src, iRegI shift, immI$2 c$2, rFlagsReg cr)
+instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 %{
   match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
 
   expand %{
-    $3L_rReg(dst, src, shift, cr);
+    $3$1_rReg(dst, src, shift, cr);
   %}
 %}')dnl
 ROL_EXPAND(L, rol, rorv)
@@ -305,7 +305,7 @@
 %{
   match(Set dst ($3$2 src1 (ConvI2L src2)));
   ins_cost(INSN_COST);
-  format %{ "$4  $dst, $src1, $5 $src2" %}
+  format %{ "$4  $dst, $src1, $src2, $5" %}
 
    ins_encode %{
      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -321,7 +321,7 @@
 %{
   match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
   ins_cost(INSN_COST);
-  format %{ "$5  $dst, $src1, $6 $src2" %}
+  format %{ "$5  $dst, $src1, $src2, $6" %}
 
    ins_encode %{
      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
@@ -363,5 +363,82 @@
 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
+dnl
+dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
+define(`ADD_SUB_EXTENDED_SHIFT', `
+instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
+%{
+  match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "$5  $dst, $src1, $src2, $6 #lshift2" %}
 
+   ins_encode %{
+     __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl                   $1 $2 $3   $4   $5   $6  $7
+ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
+ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
+ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
+dnl
+ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
+ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
+ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
+dnl
+ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
+ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
+dnl
+ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
+ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
+dnl
+dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
+define(`ADD_SUB_CONV_SHIFT', `
+instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "$3  $dst, $src1, $src2, $4 #lshift" %}
+
+   ins_encode %{
+     __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::$4, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl
+ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
+ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
+dnl
+dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
+define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
+instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
+%{
+  match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "$4  $dst, $src1, $src2, $5 #lshift" %}
+
+   ins_encode %{
+     __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::$5, ($lshift$$constant));
+   %}
+  ins_pipe(ialu_reg_reg_shift);
+%}')
+dnl
+dnl                       $1 $2  $3  $4  $5
+ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
+ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
+dnl
+ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
+ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
+dnl
 // END This section of the file is automatically generated. Do not edit --------------
--- a/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/abstractInterpreter_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -28,6 +28,7 @@
 #include "oops/constMethod.hpp"
 #include "oops/method.hpp"
 #include "runtime/frame.inline.hpp"
+#include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/macros.hpp"
 
@@ -53,27 +54,6 @@
   return i;
 }
 
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    : // fall thru
-    case Interpreter::java_lang_math_pow     : // fall thru
-    case Interpreter::java_lang_math_exp     : // fall thru
-    case Interpreter::java_lang_math_fmaD    : // fall thru
-    case Interpreter::java_lang_math_fmaF    :
-      return false;
-    default:
-      return true;
-  }
-}
-
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
   const int entry_size = frame::interpreter_frame_monitor_size();
@@ -109,13 +89,19 @@
   // for the callee's params we only need to account for the extra
   // locals.
   int size = overhead +
-         (callee_locals - callee_params)*Interpreter::stackElementWords +
+         (callee_locals - callee_params) +
          monitors * frame::interpreter_frame_monitor_size() +
-         temps* Interpreter::stackElementWords + extra_args;
+         // On the top frame, at all times SP <= ESP, and SP is
+         // 16-aligned.  We ensure this by adjusting SP on method
+         // entry and re-entry to allow room for the maximum size of
+         // the expression stack.  When we call another method we bump
+         // SP so that no stack space is wasted.  So, only on the top
+         // frame do we need to allow max_stack words.
+         (is_top_frame ? max_stack : temps + extra_args);
 
   // On AArch64 we always keep the stack pointer 16-aligned, so we
   // must round up here.
-  size = round_to(size, 2);
+  size = align_up(size, 2);
 
   return size;
 }
--- a/src/cpu/aarch64/vm/assembler_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/assembler_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,8 +1,7 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * reserved.  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE
- * HEADER.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
@@ -21,7 +20,6 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #include <stdio.h>
--- a/src/cpu/aarch64/vm/assembler_aarch64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -2201,6 +2201,8 @@
   INSN(fdiv, 1, 0, 0b111111);
   INSN(fmul, 1, 0, 0b110111);
   INSN(fsub, 0, 1, 0b110101);
+  INSN(fmla, 0, 0, 0b110011);
+  INSN(fmls, 0, 1, 0b110011);
 
 #undef INSN
 
--- a/src/cpu/aarch64/vm/bytes_aarch64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/bytes_aarch64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -30,12 +30,6 @@
 
 class Bytes: AllStatic {
  public:
-  // Returns true if the byte ordering used by Java is different from the native byte ordering
-  // of the underlying machine. For example, this is true for Intel x86, but false for Solaris
-  // on Sparc.
-  static inline bool is_Java_byte_ordering_different(){ return true; }
-
-
   // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
   // (no special code is needed since x86 CPUs can access unaligned data)
   static inline u2   get_native_u2(address p)         { return *(u2*)p; }
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -2740,8 +2740,7 @@
         // set already but no need to check.
         __ cbz(rscratch1, next);
 
-        __ andr(rscratch1, tmp, TypeEntries::type_unknown);
-        __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+        __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
 
         if (TypeEntries::is_type_none(current_klass)) {
           __ cbz(rscratch2, none);
@@ -2761,8 +2760,7 @@
                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
 
         __ ldr(tmp, mdo_addr);
-        __ andr(rscratch1, tmp, TypeEntries::type_unknown);
-        __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+        __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
       }
 
       // different than before. Cannot keep accurate profile.
@@ -2812,8 +2810,7 @@
                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
 
         __ ldr(tmp, mdo_addr);
-        __ andr(rscratch1, tmp, TypeEntries::type_unknown);
-        __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
+        __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
 
         __ orr(tmp, tmp, TypeEntries::type_unknown);
         __ str(tmp, mdo_addr);
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -598,12 +598,12 @@
   } else {
     assert (x->op() == Bytecodes::_imul, "expect imul");
     if (right.is_constant()) {
-      int c = right.get_jint_constant();
-      if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) {
+      jint c = right.get_jint_constant();
+      if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
+        right_arg->dont_load_item();
+      } else {
         // Cannot use constant op.
-        right.load_item();
-      } else {
-        right.dont_load_item();
+        right_arg->load_item();
       }
     } else {
       right.load_item();
@@ -1221,12 +1221,19 @@
   obj.load_item();
 
   // info for exceptions
-  CodeEmitInfo* info_for_exception = state_for(x);
+  CodeEmitInfo* info_for_exception =
+      (x->needs_exception_state() ? state_for(x) :
+                                    state_for(x, x->state_before(), true /*ignore_xhandler*/));
 
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == NULL, "can't patch this");
     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
+  } else if (x->is_invokespecial_receiver_check()) {
+    assert(patching_info == NULL, "can't patch this");
+    stub = new DeoptimizeStub(info_for_exception,
+                              Deoptimization::Reason_class_check,
+                              Deoptimization::Action_none);
   } else {
     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
@@ -1340,6 +1347,16 @@
 
 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
                                        CodeEmitInfo* info) {
+  // 8179954: We need to make sure that the code generated for
+  // volatile accesses forms a sequentially-consistent set of
+  // operations when combined with STLR and LDAR.  Without a leading
+  // membar it's possible for a simple Dekker test to fail if loads
+  // use LD;DMB but stores use STLR.  This can happen if C2 compiles
+  // the stores in one method and C1 compiles the loads in another.
+  if (! UseBarriersForVolatile) {
+    __ membar();
+  }
+
   __ volatile_load_mem_reg(address, result, info);
 }
 
--- a/src/cpu/aarch64/vm/debug_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/codeCache.hpp"
-#include "code/nmethod.hpp"
-#include "runtime/frame.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "utilities/debug.hpp"
-
-void pd_ps(frame f) {}
--- a/src/cpu/aarch64/vm/frame_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/frame_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -784,6 +784,8 @@
 frame::frame(void* sp, void* fp, void* pc) {
   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
 }
+
+void frame::pd_ps() {}
 #endif
 
 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
--- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1754,8 +1754,7 @@
     // Load the offset of the area within the MDO used for
     // parameters. If it's negative we're not profiling any parameters
     ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
-    cmp(tmp1, 0u);
-    br(Assembler::LT, profile_continue);
+    tbnz(tmp1, 63, profile_continue);  // i.e. sign bit set
 
     // Compute a pointer to the area for parameters from the offset
     // and move the pointer to the slot for the last
--- a/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/interpreterRT_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -369,7 +369,7 @@
   }
 
  public:
-  SlowSignatureHandler(methodHandle method, address from, intptr_t* to)
+  SlowSignatureHandler(const methodHandle& method, address from, intptr_t* to)
     : NativeSignatureIterator(method)
   {
     _from = from;
--- a/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/interpreterRT_aarch64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -47,7 +47,7 @@
 
  public:
   // Creation
-  SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+  SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
     _masm = new MacroAssembler(buffer);
     _num_int_args = (method->is_static() ? 1 : 0);
     _num_fp_args = 0;
--- a/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -76,8 +76,7 @@
           SafepointSynchronize::safepoint_counter_addr(), offset);
   Address safepoint_counter_addr(rcounter_addr, offset);
   __ ldrw(rcounter, safepoint_counter_addr);
-  __ andw(rscratch1, rcounter, 1);
-  __ cbnzw(rscratch1, slow);
+  __ tbnz(rcounter, 0, slow);
   __ eor(robj, c_rarg1, rcounter);
   __ eor(robj, robj, rcounter);               // obj, since
                                               // robj ^ rcounter ^ rcounter == robj
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,6 +38,7 @@
 #include "opto/compile.hpp"
 #include "opto/intrinsicnode.hpp"
 #include "opto/node.hpp"
+#include "prims/jvm.h"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -698,6 +699,7 @@
 // trampolines won't be emitted.
 
 address MacroAssembler::trampoline_call(Address entry, CodeBuffer *cbuf) {
+  assert(JavaThread::current()->is_Compiler_thread(), "just checking");
   assert(entry.rspec().type() == relocInfo::runtime_call_type
          || entry.rspec().type() == relocInfo::opt_virtual_call_type
          || entry.rspec().type() == relocInfo::static_call_type
@@ -2010,6 +2012,12 @@
   hlt(0);
 }
 
+void MacroAssembler::unimplemented(const char* what) {
+  char* b = new char[1024];
+  jio_snprintf(b, 1024, "unimplemented: %s", what);
+  stop(b);
+}
+
 // If a constant does not fit in an immediate field, generate some
 // number of MOV instructions and then perform the operation.
 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, unsigned imm,
@@ -4950,34 +4958,67 @@
 }
 
 
-// base:     Address of a buffer to be zeroed, 8 bytes aligned.
-// cnt:      Count in HeapWords.
-// is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
-void MacroAssembler::zero_words(Register base, Register cnt)
+// The size of the blocks erased by the zero_blocks stub.  We must
+// handle anything smaller than this ourselves in zero_words().
+const int MacroAssembler::zero_words_block_size = 8;
+
+// zero_words() is used by C2 ClearArray patterns.  It is as small as
+// possible, handling small word counts locally and delegating
+// anything larger to the zero_blocks stub.  It is expanded many times
+// in compiled code, so it is important to keep it short.
+
+// ptr:   Address of a buffer to be zeroed.
+// cnt:   Count in HeapWords.
+//
+// ptr, cnt, rscratch1, and rscratch2 are clobbered.
+void MacroAssembler::zero_words(Register ptr, Register cnt)
 {
-  if (UseBlockZeroing) {
-    block_zero(base, cnt);
-  } else {
-    fill_words(base, cnt, zr);
+  assert(is_power_of_2(zero_words_block_size), "adjust this");
+  assert(ptr == r10 && cnt == r11, "mismatch in register usage");
+
+  BLOCK_COMMENT("zero_words {");
+  cmp(cnt, zero_words_block_size);
+  Label around, done, done16;
+  br(LO, around);
+  {
+    RuntimeAddress zero_blocks =  RuntimeAddress(StubRoutines::aarch64::zero_blocks());
+    assert(zero_blocks.target() != NULL, "zero_blocks stub has not been generated");
+    if (StubRoutines::aarch64::complete()) {
+      trampoline_call(zero_blocks);
+    } else {
+      bl(zero_blocks);
+    }
   }
+  bind(around);
+  for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) {
+    Label l;
+    tbz(cnt, exact_log2(i), l);
+    for (int j = 0; j < i; j += 2) {
+      stp(zr, zr, post(ptr, 16));
+    }
+    bind(l);
+  }
+  {
+    Label l;
+    tbz(cnt, 0, l);
+    str(zr, Address(ptr));
+    bind(l);
+  }
+  BLOCK_COMMENT("} zero_words");
 }
 
-// r10 = base:   Address of a buffer to be zeroed, 8 bytes aligned.
+// base:         Address of a buffer to be zeroed, 8 bytes aligned.
 // cnt:          Immediate count in HeapWords.
-// r11 = tmp:    For use as cnt if we need to call out
-#define ShortArraySize (18 * BytesPerLong)
+#define SmallArraySize (18 * BytesPerLong)
 void MacroAssembler::zero_words(Register base, u_int64_t cnt)
 {
-  Register tmp = r11;
+  BLOCK_COMMENT("zero_words {");
   int i = cnt & 1;  // store any odd word to start
   if (i) str(zr, Address(base));
 
-  if (cnt <= ShortArraySize / BytesPerLong) {
+  if (cnt <= SmallArraySize / BytesPerLong) {
     for (; i < (int)cnt; i += 2)
       stp(zr, zr, Address(base, i * wordSize));
-  } else if (UseBlockZeroing && cnt >= (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)) {
-    mov(tmp, cnt);
-    block_zero(base, tmp, true);
   } else {
     const int unroll = 4; // Number of stp(zr, zr) instructions we'll unroll
     int remainder = cnt % (2 * unroll);
@@ -4998,6 +5039,51 @@
     stp(zr, zr, Address(pre(loop_base, 2 * unroll * wordSize)));
     cbnz(cnt_reg, loop);
   }
+  BLOCK_COMMENT("} zero_words");
+}
+
+// Zero blocks of memory by using DC ZVA.
+//
+// Aligns the base address first sufficently for DC ZVA, then uses
+// DC ZVA repeatedly for every full block.  cnt is the size to be
+// zeroed in HeapWords.  Returns the count of words left to be zeroed
+// in cnt.
+//
+// NOTE: This is intended to be used in the zero_blocks() stub.  If
+// you want to use it elsewhere, note that cnt must be >= 2*zva_length.
+void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) {
+  Register tmp = rscratch1;
+  Register tmp2 = rscratch2;
+  int zva_length = VM_Version::zva_length();
+  Label initial_table_end, loop_zva;
+  Label fini;
+
+  // Base must be 16 byte aligned. If not just return and let caller handle it
+  tst(base, 0x0f);
+  br(Assembler::NE, fini);
+  // Align base with ZVA length.
+  neg(tmp, base);
+  andr(tmp, tmp, zva_length - 1);
+
+  // tmp: the number of bytes to be filled to align the base with ZVA length.
+  add(base, base, tmp);
+  sub(cnt, cnt, tmp, Assembler::ASR, 3);
+  adr(tmp2, initial_table_end);
+  sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
+  br(tmp2);
+
+  for (int i = -zva_length + 16; i < 0; i += 16)
+    stp(zr, zr, Address(base, i));
+  bind(initial_table_end);
+
+  sub(cnt, cnt, zva_length >> 3);
+  bind(loop_zva);
+  dc(Assembler::ZVA, base);
+  subs(cnt, cnt, zva_length >> 3);
+  add(base, base, zva_length);
+  br(Assembler::GE, loop_zva);
+  add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
+  bind(fini);
 }
 
 // base:   Address of a buffer to be filled, 8 bytes aligned.
@@ -5058,69 +5144,6 @@
   bind(fini);
 }
 
-// Use DC ZVA to do fast zeroing.
-// base:   Address of a buffer to be zeroed, 8 bytes aligned.
-// cnt:    Count in HeapWords.
-// is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
-void MacroAssembler::block_zero(Register base, Register cnt, bool is_large)
-{
-  Label small;
-  Label store_pair, loop_store_pair, done;
-  Label base_aligned;
-
-  assert_different_registers(base, cnt, rscratch1);
-  guarantee(base == r10 && cnt == r11, "fix register usage");
-
-  Register tmp = rscratch1;
-  Register tmp2 = rscratch2;
-  int zva_length = VM_Version::zva_length();
-
-  // Ensure ZVA length can be divided by 16. This is required by
-  // the subsequent operations.
-  assert (zva_length % 16 == 0, "Unexpected ZVA Length");
-
-  if (!is_large) cbz(cnt, done);
-  tbz(base, 3, base_aligned);
-  str(zr, Address(post(base, 8)));
-  sub(cnt, cnt, 1);
-  bind(base_aligned);
-
-  // Ensure count >= zva_length * 2 so that it still deserves a zva after
-  // alignment.
-  if (!is_large || !(BlockZeroingLowLimit >= zva_length * 2)) {
-    int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
-    subs(tmp, cnt, low_limit >> 3);
-    br(Assembler::LT, small);
-  }
-
-  far_call(StubRoutines::aarch64::get_zero_longs());
-
-  bind(small);
-
-  const int unroll = 8; // Number of stp instructions we'll unroll
-  Label small_loop, small_table_end;
-
-  andr(tmp, cnt, (unroll-1) * 2);
-  sub(cnt, cnt, tmp);
-  add(base, base, tmp, Assembler::LSL, 3);
-  adr(tmp2, small_table_end);
-  sub(tmp2, tmp2, tmp, Assembler::LSL, 1);
-  br(tmp2);
-
-  bind(small_loop);
-  add(base, base, unroll * 16);
-  for (int i = -unroll; i < 0; i++)
-    stp(zr, zr, Address(base, i * 16));
-  bind(small_table_end);
-  subs(cnt, cnt, unroll * 2);
-  br(Assembler::GE, small_loop);
-
-  tbz(cnt, 0, done);
-  str(zr, Address(post(base, 8)));
-
-  bind(done);
-}
-
 // Intrinsic for sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray and
 // java/lang/StringUTF16.compress.
 void MacroAssembler::encode_iso_array(Register src, Register dst,
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -169,6 +169,7 @@
 
   template<class T>
   inline void cmpw(Register Rd, T imm)  { subsw(zr, Rd, imm); }
+  // imm is limited to 12 bits.
   inline void cmp(Register Rd, unsigned imm)  { subs(zr, Rd, imm); }
 
   inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); }
@@ -941,7 +942,7 @@
 
   void untested()                                { stop("untested"); }
 
-  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
+  void unimplemented(const char* what = "");
 
   void should_not_reach_here()                   { stop("should not reach here"); }
 
@@ -949,8 +950,8 @@
   void bang_stack_with_offset(int offset) {
     // stack grows down, caller passes positive offset
     assert(offset > 0, "must bang with negative offset");
-    mov(rscratch2, -offset);
-    str(zr, Address(sp, rscratch2));
+    sub(rscratch2, sp, offset);
+    str(zr, Address(rscratch2));
   }
 
   // Writes to stack successive pages until offset reached to check for
@@ -1215,8 +1216,10 @@
 
   void fill_words(Register base, Register cnt, Register value);
   void zero_words(Register base, u_int64_t cnt);
-  void zero_words(Register base, Register cnt);
-  void block_zero(Register base, Register cnt, bool is_large = false);
+  void zero_words(Register ptr, Register cnt);
+  void zero_dcache_blocks(Register base, Register cnt);
+
+  static const int zero_words_block_size;
 
   void byte_array_inflate(Register src, Register dst, Register len,
                           FloatRegister vtmp1, FloatRegister vtmp2,
--- a/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -137,8 +137,9 @@
   __ verify_oop(method_temp);
   __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
   __ verify_oop(method_temp);
-  // the following assumes that a Method* is normally compressed in the vmtarget field:
-  __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));
+  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
+  __ verify_oop(method_temp);
+  __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
 
   if (VerifyMethodHandles && !for_compiler_entry) {
     // make sure recv is already on stack
@@ -282,7 +283,8 @@
 
     Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
     Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
-    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));
+    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()));
+    Address vmtarget_method( rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()));
 
     Register temp1_recv_klass = temp1;
     if (iid != vmIntrinsics::_linkToStatic) {
@@ -335,14 +337,16 @@
       if (VerifyMethodHandles) {
         verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
       }
-      __ ldr(rmethod, member_vmtarget);
+      __ load_heap_oop(rmethod, member_vmtarget);
+      __ ldr(rmethod, vmtarget_method);
       break;
 
     case vmIntrinsics::_linkToStatic:
       if (VerifyMethodHandles) {
         verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
       }
-      __ ldr(rmethod, member_vmtarget);
+      __ load_heap_oop(rmethod, member_vmtarget);
+      __ ldr(rmethod, vmtarget_method);
       break;
 
     case vmIntrinsics::_linkToVirtual:
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -36,6 +36,7 @@
 #include "oops/compiledICHolder.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/vframeArray.hpp"
+#include "utilities/align.hpp"
 #include "vmreg_aarch64.inline.hpp"
 #ifdef COMPILER1
 #include "c1/c1_Runtime1.hpp"
@@ -123,7 +124,7 @@
   assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
 #endif
 
-  int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
+  int frame_size_in_bytes = align_up(additional_frame_words*wordSize +
                                      reg_save_size*BytesPerInt, 16);
   // OopMap frame size is in compiler stack slots (jint's) not bytes or words
   int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
@@ -190,7 +191,7 @@
   __ ldr(r0, Address(sp, r0_offset_in_bytes()));
 
   // Pop all of the register save are off the stack
-  __ add(sp, sp, round_to(return_offset_in_bytes(), 16));
+  __ add(sp, sp, align_up(return_offset_in_bytes(), 16));
 }
 
 // Is vector's size (in bytes) bigger than a size saved by default?
@@ -317,7 +318,7 @@
     }
   }
 
-  return round_to(stk_args, 2);
+  return align_up(stk_args, 2);
 }
 
 // Patch the callers callsite with entry to compiled code if it exists.
@@ -375,7 +376,7 @@
   __ mov(r13, sp);
 
   // stack is aligned, keep it that way
-  extraspace = round_to(extraspace, 2*wordSize);
+  extraspace = align_up(extraspace, 2*wordSize);
 
   if (extraspace)
     __ sub(sp, sp, extraspace);
@@ -547,7 +548,7 @@
   }
 
   // Cut-out for having no stack args.
-  int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+  int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
   if (comp_args_on_stack) {
     __ sub(rscratch1, sp, comp_words_on_stack * wordSize);
     __ andr(sp, rscratch1, -16);
@@ -1206,7 +1207,7 @@
 }
 
 static void verify_oop_args(MacroAssembler* masm,
-                            methodHandle method,
+                            const methodHandle& method,
                             const BasicType* sig_bt,
                             const VMRegPair* regs) {
   Register temp_reg = r19;  // not part of any compiled calling seq
@@ -1228,7 +1229,7 @@
 }
 
 static void gen_special_dispatch(MacroAssembler* masm,
-                                 methodHandle method,
+                                 const methodHandle& method,
                                  const BasicType* sig_bt,
                                  const VMRegPair* regs) {
   verify_oop_args(masm, method, sig_bt, regs);
@@ -1486,7 +1487,7 @@
     total_save_slots = double_slots * 2 + single_slots;
     // align the save area
     if (double_slots != 0) {
-      stack_slots = round_to(stack_slots, 2);
+      stack_slots = align_up(stack_slots, 2);
     }
   }
 
@@ -1543,7 +1544,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
+  stack_slots = align_up(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2293,7 +2294,7 @@
     return 0;                   // No adjustment for negative locals
   int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
   // diff is counted in stack words
-  return round_to(diff, 2);
+  return align_up(diff, 2);
 }
 
 
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -39,6 +39,7 @@
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
+#include "utilities/align.hpp"
 #ifdef COMPILER2
 #include "opto/runtime.hpp"
 #endif
@@ -619,19 +620,21 @@
 
   // Generate code for an array write pre barrier
   //
-  //     addr    -  starting address
-  //     count   -  element count
-  //     tmp     - scratch register
+  //     addr       - starting address
+  //     count      - element count
+  //     tmp        - scratch register
+  //     saved_regs - registers to be saved before calling static_write_ref_array_pre
   //
-  //     Destroy no registers except rscratch1 and rscratch2
+  //     Callers must specify which registers to preserve in saved_regs.
+  //     Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
   //
-  void  gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
+  void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
     BarrierSet* bs = Universe::heap()->barrier_set();
     switch (bs->kind()) {
     case BarrierSet::G1SATBCTLogging:
       // With G1, don't generate the call if we statically know that the target in uninitialized
       if (!dest_uninitialized) {
-        __ push_call_clobbered_registers();
+        __ push(saved_regs, sp);
         if (count == c_rarg0) {
           if (addr == c_rarg1) {
             // exactly backwards!!
@@ -647,7 +650,7 @@
           __ mov(c_rarg1, count);
         }
         __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
-        __ pop_call_clobbered_registers();
+        __ pop(saved_regs, sp);
         break;
       case BarrierSet::CardTableForRS:
       case BarrierSet::CardTableExtension:
@@ -664,20 +667,23 @@
   // Generate code for an array write post barrier
   //
   //  Input:
-  //     start    - register containing starting address of destination array
-  //     end      - register containing ending address of destination array
-  //     scratch  - scratch register
+  //     start      - register containing starting address of destination array
+  //     end        - register containing ending address of destination array
+  //     scratch    - scratch register
+  //     saved_regs - registers to be saved before calling static_write_ref_array_post
   //
   //  The input registers are overwritten.
   //  The ending address is inclusive.
-  void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
+  //  Callers must specify which registers to preserve in saved_regs.
+  //  Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
+  void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
     assert_different_registers(start, end, scratch);
     BarrierSet* bs = Universe::heap()->barrier_set();
     switch (bs->kind()) {
       case BarrierSet::G1SATBCTLogging:
 
         {
-          __ push_call_clobbered_registers();
+          __ push(saved_regs, sp);
           // must compute element count unless barrier set interface is changed (other platforms supply count)
           assert_different_registers(start, end, scratch);
           __ lea(scratch, Address(end, BytesPerHeapOop));
@@ -686,7 +692,7 @@
           __ mov(c_rarg0, start);
           __ mov(c_rarg1, scratch);
           __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
-          __ pop_call_clobbered_registers();
+          __ pop(saved_regs, sp);
         }
         break;
       case BarrierSet::CardTableForRS:
@@ -719,48 +725,74 @@
     }
   }
 
-  address generate_zero_longs(Register base, Register cnt) {
-    Register tmp = rscratch1;
-    Register tmp2 = rscratch2;
-    int zva_length = VM_Version::zva_length();
-    Label initial_table_end, loop_zva;
-    Label fini;
+  // The inner part of zero_words().  This is the bulk operation,
+  // zeroing words in blocks, possibly using DC ZVA to do it.  The
+  // caller is responsible for zeroing the last few words.
+  //
+  // Inputs:
+  // r10: the HeapWord-aligned base address of an array to zero.
+  // r11: the count in HeapWords, r11 > 0.
+  //
+  // Returns r10 and r11, adjusted for the caller to clear.
+  // r10: the base address of the tail of words left to clear.
+  // r11: the number of words in the tail.
+  //      r11 < MacroAssembler::zero_words_block_size.
+
+  address generate_zero_blocks() {
+    Label store_pair, loop_store_pair, done;
+    Label base_aligned;
+
+    Register base = r10, cnt = r11;
 
     __ align(CodeEntryAlignment);
-    StubCodeMark mark(this, "StubRoutines", "zero_longs");
+    StubCodeMark mark(this, "StubRoutines", "zero_blocks");
     address start = __ pc();
 
-    // Base must be 16 byte aligned. If not just return and let caller handle it
-    __ tst(base, 0x0f);
-    __ br(Assembler::NE, fini);
-    // Align base with ZVA length.
-    __ neg(tmp, base);
-    __ andr(tmp, tmp, zva_length - 1);
-
-    // tmp: the number of bytes to be filled to align the base with ZVA length.
-    __ add(base, base, tmp);
-    __ sub(cnt, cnt, tmp, Assembler::ASR, 3);
-    __ adr(tmp2, initial_table_end);
-    __ sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
-    __ br(tmp2);
-
-    for (int i = -zva_length + 16; i < 0; i += 16)
-      __ stp(zr, zr, Address(base, i));
-    __ bind(initial_table_end);
-
-    __ sub(cnt, cnt, zva_length >> 3);
-    __ bind(loop_zva);
-    __ dc(Assembler::ZVA, base);
-    __ subs(cnt, cnt, zva_length >> 3);
-    __ add(base, base, zva_length);
-    __ br(Assembler::GE, loop_zva);
-    __ add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
-    __ bind(fini);
+    if (UseBlockZeroing) {
+      int zva_length = VM_Version::zva_length();
+
+      // Ensure ZVA length can be divided by 16. This is required by
+      // the subsequent operations.
+      assert (zva_length % 16 == 0, "Unexpected ZVA Length");
+
+      __ tbz(base, 3, base_aligned);
+      __ str(zr, Address(__ post(base, 8)));
+      __ sub(cnt, cnt, 1);
+      __ bind(base_aligned);
+
+      // Ensure count >= zva_length * 2 so that it still deserves a zva after
+      // alignment.
+      Label small;
+      int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
+      __ subs(rscratch1, cnt, low_limit >> 3);
+      __ br(Assembler::LT, small);
+      __ zero_dcache_blocks(base, cnt);
+      __ bind(small);
+    }
+
+    {
+      // Number of stp instructions we'll unroll
+      const int unroll =
+        MacroAssembler::zero_words_block_size / 2;
+      // Clear the remaining blocks.
+      Label loop;
+      __ subs(cnt, cnt, unroll * 2);
+      __ br(Assembler::LT, done);
+      __ bind(loop);
+      for (int i = 0; i < unroll; i++)
+        __ stp(zr, zr, __ post(base, 16));
+      __ subs(cnt, cnt, unroll * 2);
+      __ br(Assembler::GE, loop);
+      __ bind(done);
+      __ add(cnt, cnt, unroll * 2);
+    }
+
     __ ret(lr);
 
     return start;
   }
 
+
   typedef enum {
     copy_forwards = 1,
     copy_backwards = -1
@@ -795,7 +827,7 @@
     Label again, drain;
     const char *stub_name;
     if (direction == copy_forwards)
-      stub_name = "foward_copy_longs";
+      stub_name = "forward_copy_longs";
     else
       stub_name = "backward_copy_longs";
     StubCodeMark mark(this, "StubRoutines", stub_name);
@@ -1412,6 +1444,7 @@
   address generate_disjoint_copy(size_t size, bool aligned, bool is_oop, address *entry,
                                   const char *name, bool dest_uninitialized = false) {
     Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
+    RegSet saved_reg = RegSet::of(s, d, count);
     __ align(CodeEntryAlignment);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
@@ -1424,9 +1457,9 @@
     }
 
     if (is_oop) {
+      gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
+      // save regs before copy_memory
       __ push(RegSet::of(d, count), sp);
-      // no registers are destroyed by this call
-      gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
     }
     copy_memory(aligned, s, d, count, rscratch1, size);
     if (is_oop) {
@@ -1435,7 +1468,7 @@
         verify_oop_array(size, d, count, r16);
       __ sub(count, count, 1); // make an inclusive end pointer
       __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
-      gen_write_ref_array_post_barrier(d, count, rscratch1);
+      gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
     }
     __ leave();
     __ mov(r0, zr); // return 0
@@ -1468,7 +1501,7 @@
                                  address *entry, const char *name,
                                  bool dest_uninitialized = false) {
     Register s = c_rarg0, d = c_rarg1, count = c_rarg2;
-
+    RegSet saved_regs = RegSet::of(s, d, count);
     StubCodeMark mark(this, "StubRoutines", name);
     address start = __ pc();
     __ enter();
@@ -1485,9 +1518,9 @@
     __ br(Assembler::HS, nooverlap_target);
 
     if (is_oop) {
+      gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
+      // save regs before copy_memory
       __ push(RegSet::of(d, count), sp);
-      // no registers are destroyed by this call
-      gen_write_ref_array_pre_barrier(d, count, dest_uninitialized);
     }
     copy_memory(aligned, s, d, count, rscratch1, -size);
     if (is_oop) {
@@ -1496,7 +1529,7 @@
         verify_oop_array(size, d, count, r16);
       __ sub(count, count, 1); // make an inclusive end pointer
       __ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
-      gen_write_ref_array_post_barrier(d, count, rscratch1);
+      gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
     }
     __ leave();
     __ mov(r0, zr); // return 0
@@ -1778,6 +1811,9 @@
     const Register ckoff       = c_rarg3;   // super_check_offset
     const Register ckval       = c_rarg4;   // super_klass
 
+    RegSet wb_pre_saved_regs = RegSet::range(c_rarg0, c_rarg4);
+    RegSet wb_post_saved_regs = RegSet::of(count);
+
     // Registers used as temps (r18, r19, r20 are save-on-entry)
     const Register count_save  = r21;       // orig elementscount
     const Register start_to    = r20;       // destination array start address
@@ -1835,7 +1871,7 @@
     }
 #endif //ASSERT
 
-    gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+    gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
 
     // save the original count
     __ mov(count_save, count);
@@ -1879,7 +1915,7 @@
 
     __ BIND(L_do_card_marks);
     __ add(to, to, -heapOopSize);         // make an inclusive end pointer
-    gen_write_ref_array_post_barrier(start_to, to, rscratch1);
+    gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
 
     __ bind(L_done_pop);
     __ pop(RegSet::of(r18, r19, r20, r21), sp);
@@ -2346,20 +2382,16 @@
     __ subw(count, count, cnt_words, Assembler::LSL, 3 - shift);
     if (UseBlockZeroing) {
       Label non_block_zeroing, rest;
-      Register tmp = rscratch1;
-      // count >= BlockZeroingLowLimit && value == 0
-      __ subs(tmp, cnt_words, BlockZeroingLowLimit >> 3);
-      __ ccmp(value, 0 /* comparing value */, 0 /* NZCV */, Assembler::GE);
-      __ br(Assembler::NE, non_block_zeroing);
+      // If the fill value is zero we can use the fast zero_words().
+      __ cbnz(value, non_block_zeroing);
       __ mov(bz_base, to);
-      __ block_zero(bz_base, cnt_words, true);
-      __ mov(to, bz_base);
+      __ add(to, to, cnt_words, Assembler::LSL, LogBytesPerWord);
+      __ zero_words(bz_base, cnt_words);
       __ b(rest);
       __ bind(non_block_zeroing);
       __ fill_words(to, cnt_words, value);
       __ bind(rest);
-    }
-    else {
+    } else {
       __ fill_words(to, cnt_words, value);
     }
 
@@ -2420,7 +2452,7 @@
     generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
     generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
 
-    StubRoutines::aarch64::_zero_longs = generate_zero_longs(r10, r11);
+    StubRoutines::aarch64::_zero_blocks = generate_zero_blocks();
 
     //*** jbyte
     // Always need aligned and unaligned versions
@@ -4769,6 +4801,7 @@
                                                        &StubRoutines::_safefetchN_fault_pc,
                                                        &StubRoutines::_safefetchN_continuation_pc);
 #endif
+    StubRoutines::aarch64::set_completed();
   }
 
  public:
--- a/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -43,7 +43,8 @@
 address StubRoutines::aarch64::_float_sign_flip = NULL;
 address StubRoutines::aarch64::_double_sign_mask = NULL;
 address StubRoutines::aarch64::_double_sign_flip = NULL;
-address StubRoutines::aarch64::_zero_longs = NULL;
+address StubRoutines::aarch64::_zero_blocks = NULL;
+bool StubRoutines::aarch64::_completed = false;
 
 /**
  *  crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
--- a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -61,7 +61,8 @@
   static address _double_sign_mask;
   static address _double_sign_flip;
 
-  static address _zero_longs;
+  static address _zero_blocks;
+  static bool _completed;
 
  public:
 
@@ -115,12 +116,19 @@
     return _double_sign_flip;
   }
 
-  static address get_zero_longs()
-  {
-    return _zero_longs;
+  static address zero_blocks() {
+    return _zero_blocks;
   }
 
- private:
+  static bool complete() {
+    return _completed;
+  }
+
+  static void set_completed() {
+    _completed = true;
+  }
+
+private:
   static juint    _crc_table[];
 
 };
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -246,8 +246,7 @@
       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
       __ movw(bc_reg, bc);
-      __ cmpw(temp_reg, (unsigned) 0);
-      __ br(Assembler::EQ, L_patch_done);  // don't patch
+      __ cbzw(temp_reg, L_patch_done);  // don't patch
     }
     break;
   default:
@@ -2389,17 +2388,31 @@
   const Register obj   = r4;
   const Register off   = r19;
   const Register flags = r0;
+  const Register raw_flags = r6;
   const Register bc    = r4; // uses same reg as obj, so don't mix them
 
   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
   jvmti_post_field_access(cache, index, is_static, false);
-  load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
+  load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static);
 
   if (!is_static) {
     // obj is on the stack
     pop_and_check_object(obj);
   }
 
+  // 8179954: We need to make sure that the code generated for
+  // volatile accesses forms a sequentially-consistent set of
+  // operations when combined with STLR and LDAR.  Without a leading
+  // membar it's possible for a simple Dekker test to fail if loads
+  // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
+  // the stores in one method and we interpret the loads in another.
+  if (! UseBarriersForVolatile) {
+    Label notVolatile;
+    __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+    __ membar(MacroAssembler::AnyAny);
+    __ bind(notVolatile);
+  }
+
   const Address field(obj, off);
 
   Label Done, notByte, notBool, notInt, notShort, notChar,
@@ -2407,7 +2420,8 @@
 
   // x86 uses a shift and mask or wings it with a shift plus assert
   // the mask is not needed. aarch64 just uses bitfield extract
-  __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift,  ConstantPoolCacheEntry::tos_state_bits);
+  __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
+           ConstantPoolCacheEntry::tos_state_bits);
 
   assert(btos == 0, "change code, btos != 0");
   __ cbnz(flags, notByte);
@@ -2529,9 +2543,11 @@
 #endif
 
   __ bind(Done);
-  // It's really not worth bothering to check whether this field
-  // really is volatile in the slow case.
+
+  Label notVolatile;
+  __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
   __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
+  __ bind(notVolatile);
 }
 
 
@@ -2979,6 +2995,19 @@
   __ null_check(r0);
   const Address field(r0, r1);
 
+  // 8179954: We need to make sure that the code generated for
+  // volatile accesses forms a sequentially-consistent set of
+  // operations when combined with STLR and LDAR.  Without a leading
+  // membar it's possible for a simple Dekker test to fail if loads
+  // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
+  // the stores in one method and we interpret the loads in another.
+  if (! UseBarriersForVolatile) {
+    Label notVolatile;
+    __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+    __ membar(MacroAssembler::AnyAny);
+    __ bind(notVolatile);
+  }
+
   // access field
   switch (bytecode()) {
   case Bytecodes::_fast_agetfield:
@@ -3027,6 +3056,22 @@
   __ get_cache_and_index_at_bcp(r2, r3, 2);
   __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
                                   ConstantPoolCacheEntry::f2_offset())));
+
+  // 8179954: We need to make sure that the code generated for
+  // volatile accesses forms a sequentially-consistent set of
+  // operations when combined with STLR and LDAR.  Without a leading
+  // membar it's possible for a simple Dekker test to fail if loads
+  // use LDR;DMB but stores use STLR.  This can happen if C2 compiles
+  // the stores in one method and we interpret the loads in another.
+  if (! UseBarriersForVolatile) {
+    Label notVolatile;
+    __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
+                                     ConstantPoolCacheEntry::flags_offset())));
+    __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
+    __ membar(MacroAssembler::AnyAny);
+    __ bind(notVolatile);
+  }
+
   // make sure exception is reported in correct bcp range (getfield is
   // next instruction)
   __ increment(rbcp);
--- a/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/aarch64/vm/vtableStubs_aarch64.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -51,6 +51,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int aarch64_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), aarch64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/cpu/arm/vm/abstractInterpreter_arm.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/abstractInterpreter_arm.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -32,6 +32,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/synchronizer.hpp"
+#include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
 int AbstractInterpreter::BasicType_as_index(BasicType type) {
@@ -68,23 +69,6 @@
   return i;
 }
 
-// These should never be compiled since the interpreter will prefer
-// the compiled version to the intrinsic version.
-bool AbstractInterpreter::can_be_compiled(methodHandle m) {
-  switch (method_kind(m)) {
-    case Interpreter::java_lang_math_sin     : // fall thru
-    case Interpreter::java_lang_math_cos     : // fall thru
-    case Interpreter::java_lang_math_tan     : // fall thru
-    case Interpreter::java_lang_math_abs     : // fall thru
-    case Interpreter::java_lang_math_log     : // fall thru
-    case Interpreter::java_lang_math_log10   : // fall thru
-    case Interpreter::java_lang_math_sqrt    :
-      return false;
-    default:
-      return true;
-  }
-}
-
 // How much stack a method activation needs in words.
 int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
   const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12);  // see generate_call_stub
@@ -125,7 +109,7 @@
          tempcount*Interpreter::stackElementWords + extra_args;
 
 #ifdef AARCH64
-  size = round_to(size, StackAlignmentInBytes/BytesPerWord);
+  size = align_up(size, StackAlignmentInBytes/BytesPerWord);
 #endif // AARCH64
 
   return size;
@@ -206,7 +190,7 @@
   }
   if (caller->is_interpreted_frame()) {
     intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
-    locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
+    locals_base = align_down(locals_base, StackAlignmentInBytes);
     assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
 
   } else if (caller->is_compiled_frame()) {
@@ -244,7 +228,7 @@
   intptr_t* extended_sp = (intptr_t*) monbot  -
     (max_stack * Interpreter::stackElementWords) -
     popframe_extra_args;
-  extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
+  extended_sp = align_down(extended_sp, StackAlignmentInBytes);
   interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
 #else
   interpreter_frame->interpreter_frame_set_last_sp(stack_top);
@@ -256,7 +240,7 @@
 
 #ifdef AARCH64
   if (caller->is_interpreted_frame()) {
-    intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
+    intptr_t* sender_sp = align_down(caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
     interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
 
   } else {
--- a/src/cpu/arm/vm/arm.ad	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/arm.ad	Thu Aug 10 04:49:27 2017 -0700
@@ -1881,7 +1881,7 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 1*VMRegImpl::slots_per_word +
-              round_to((Compile::current()->in_preserve_stack_slots() +
+              align_up((Compile::current()->in_preserve_stack_slots() +
                         Compile::current()->fixed_slots()),
                        stack_alignment_in_slots()));
 
@@ -11752,9 +11752,13 @@
 
   size(4);
   // Use the following format syntax
-  format %{ "breakpoint   ; ShouldNotReachHere" %}
-  ins_encode %{
-    __ breakpoint();
+  format %{ "ShouldNotReachHere" %}
+  ins_encode %{
+#ifdef AARCH64
+    __ dpcs1(0xdead);
+#else
+    __ udf(0xdead);
+#endif
   %}
   ins_pipe(tail_call);
 %}
--- a/src/cpu/arm/vm/assembler_arm_32.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/assembler_arm_32.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -578,6 +578,11 @@
   F(bl, 0xb)
 #undef F
 
+  void udf(int imm_16) {
+    assert((imm_16 >> 16) == 0, "encoding constraint");
+    emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
+  }
+
   // ARMv7 instructions
 
 #define F(mnemonic, wt) \
--- a/src/cpu/arm/vm/assembler_arm_64.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/assembler_arm_64.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1083,6 +1083,7 @@
 
   F(brk, 0b001, 0b000, 0b00)
   F(hlt, 0b010, 0b000, 0b00)
+  F(dpcs1, 0b101, 0b000, 0b01)
 #undef F
 
   enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>
--- a/src/cpu/arm/vm/bytes_arm.hpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/bytes_arm.hpp	Thu Aug 10 04:49:27 2017 -0700
@@ -35,12 +35,6 @@
 class Bytes: AllStatic {
 
  public:
-  // Returns true if the byte ordering used by Java is different from the native byte ordering
-  // of the underlying machine.
-  static inline bool is_Java_byte_ordering_different() {
-    return VM_LITTLE_ENDIAN != 0;
-  }
-
   static inline u2 get_Java_u2(address p) {
     return (u2(p[0]) << 8) | u2(p[1]);
   }
--- a/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1453,10 +1453,11 @@
       ciKlass* k = op->klass();
       assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
 
+      if (stub->is_simple_exception_stub()) {
       // TODO: ARM - Late binding is used to prevent confusion of register allocator
       assert(stub->is_exception_throw_stub(), "must be");
       ((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
-
+      }
       ciMethodData* md;
       ciProfileData* data;
       int mdo_offset_bias = 0;
--- a/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp	Mon May 15 14:13:57 2017 -0400
+++ b/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp	Thu Aug 10 04:49:27 2017 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -923,8 +923,8 @@
   } else {
     left_arg->load_item();
     if (x->op() == Bytecodes::_imul && right_arg->is_constant()) {
-      int c = right_arg->get_jint_constant();
-      if (c > 0 && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
+      jint c = right_arg->get_jint_constant();
+      if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
         right_arg->dont_load_item();
       } else {
         right_arg->load_item();
@@ -1412,12 +1412,20 @@
 
   obj.load_item();
 
-  CodeEmitInfo* info_for_exception = state_for(x);
+  CodeEmitInfo* info_for_exception =
+    (x->needs_exception_state() ? state_for(x) :
+                                  state_for(x, x->state_before(), true /*ignore_xhandler*/));
+
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == NULL, "can't patch this");
     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
                                    LIR_OprFact::illegalOpr, info_for_exception);
+  } else if (x->is_invokespecial_receiver_check()) {
+    assert(patching_info == NULL, "can't patch this");
+    stub = new DeoptimizeStub(info_for_exception,
+                              Deoptimization::Reason_class_check,
+                              Deoptimization::Action_none);
   } else {
     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
                                    LIR_OprFact::illegalOpr, info_for_exce