changeset 11787:943bf73b49c3 jdk-9+131

Merge
author amurillo
date Fri, 05 Aug 2016 09:50:25 -0700
parents 5c61b454a1cf f2f9c77799cd
children 25390dd1ae8c 3cfddcb268e3 28e24d221653
files src/cpu/ppc/vm/interp_masm_ppc_64.hpp src/cpu/ppc/vm/ppc_64.ad src/cpu/ppc/vm/stubRoutines_ppc_64.hpp src/cpu/ppc/vm/templateTable_ppc_64.hpp src/cpu/x86/vm/stubRoutines_x86_32.hpp src/cpu/x86/vm/stubRoutines_x86_64.hpp src/jdk.hotspot.agent/doc/ReadMe-JavaScript.text src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ArrayReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ArrayTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/BaseLineInfo.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/BooleanTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/BooleanValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ByteTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ByteValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/CharTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/CharValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ClassLoaderReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ClassObjectReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ClassTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ConcreteMethodImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ConnectorImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/DoubleTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/DoubleValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/FieldImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/FloatTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/FloatValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/IntegerTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/IntegerValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/InterfaceTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/JNITypeParser.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/JVMTIThreadState.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/LineInfo.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/LocalVariableImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/LocationImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/LongTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/LongValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/MethodImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/MirrorImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/MonitorInfoImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/NonConcreteMethodImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ObjectReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/PrimitiveTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/PrimitiveValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SACoreAttachingConnector.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SADebugServer.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SADebugServerAttachingConnector.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SAJDIClassLoader.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SAPIDAttachingConnector.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/SDE.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ShortTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ShortValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/StackFrameImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/StratumLineInfo.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/StringReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ThreadGroupReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ThreadReferenceImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/TypeComponentImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/TypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VMModifiers.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ValueContainer.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ValueImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VirtualMachineImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VoidTypeImpl.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/VoidValueImpl.java src/jdk.hotspot.agent/test/jdi/README.jjh src/jdk.hotspot.agent/test/jdi/SASanityChecker.java src/jdk.hotspot.agent/test/jdi/TEST.ROOT src/jdk.hotspot.agent/test/jdi/TargetAdapter.java src/jdk.hotspot.agent/test/jdi/TargetListener.java src/jdk.hotspot.agent/test/jdi/TestScaffold.java src/jdk.hotspot.agent/test/jdi/VMConnection.java src/jdk.hotspot.agent/test/jdi/jstack.sh src/jdk.hotspot.agent/test/jdi/jstack64.sh src/jdk.hotspot.agent/test/jdi/multivm.java src/jdk.hotspot.agent/test/jdi/multivm.sh src/jdk.hotspot.agent/test/jdi/runjdb.sh src/jdk.hotspot.agent/test/jdi/runjpda.sh src/jdk.hotspot.agent/test/jdi/runsa.sh src/jdk.hotspot.agent/test/jdi/sagclient.java src/jdk.hotspot.agent/test/jdi/sagdoit.java src/jdk.hotspot.agent/test/jdi/sagtarg.java src/jdk.hotspot.agent/test/jdi/sagtest.java src/jdk.hotspot.agent/test/jdi/sasanity.sh src/jdk.hotspot.agent/test/jdi/serialvm.java src/jdk.hotspot.agent/test/jdi/serialvm.sh src/os/aix/vm/mutex_aix.inline.hpp src/os/bsd/vm/mutex_bsd.cpp src/os/bsd/vm/mutex_bsd.inline.hpp src/os/linux/vm/mutex_linux.cpp src/os/linux/vm/mutex_linux.inline.hpp src/os/solaris/vm/mutex_solaris.cpp src/os/solaris/vm/mutex_solaris.inline.hpp src/os/windows/vm/mutex_windows.cpp src/os/windows/vm/mutex_windows.inline.hpp test/compiler/c1/6478991/NullCheckTest.java test/compiler/c1/6579789/Test6579789.java test/compiler/c1/6756768/Test6756768.java test/compiler/c1/6756768/Test6756768_2.java test/compiler/c1/6757316/Test6757316.java test/compiler/c1/6758234/Test6758234.java test/compiler/c1/6769124/TestArrayCopy6769124.java test/compiler/c1/6769124/TestDeoptInt6769124.java test/compiler/c1/6769124/TestUnalignedLoad6769124.java test/compiler/c1/6795465/Test6795465.java test/compiler/c1/6849574/Test.java test/compiler/c1/6855215/Test6855215.java test/compiler/c1/6932496/Test6932496.java test/compiler/c1/7042153/Test7042153.java test/compiler/c1/7090976/Test7090976.java test/compiler/c1/7103261/Test7103261.java test/compiler/c1/7123108/Test7123108.java test/compiler/c1/8004051/Test8004051.java test/compiler/c1/8011706/Test8011706.java test/compiler/c1/8011771/Test8011771.java test/compiler/c2/5057225/Test5057225.java test/compiler/c2/5091921/Test5091921.java test/compiler/c2/5091921/Test6186134.java test/compiler/c2/5091921/Test6196102.java test/compiler/c2/5091921/Test6357214.java test/compiler/c2/5091921/Test6559156.java test/compiler/c2/5091921/Test6753639.java test/compiler/c2/5091921/Test6850611.java test/compiler/c2/5091921/Test6890943.java test/compiler/c2/5091921/Test6897150.java test/compiler/c2/5091921/Test6905845.java test/compiler/c2/5091921/Test6931567.java test/compiler/c2/5091921/Test6935022.java test/compiler/c2/5091921/Test6959129.java test/compiler/c2/5091921/Test6985295.java test/compiler/c2/5091921/Test6992759.java test/compiler/c2/5091921/Test7005594.java test/compiler/c2/5091921/Test7005594.sh test/compiler/c2/5091921/Test7020614.java test/compiler/c2/5091921/input6890943.txt test/compiler/c2/5091921/output6890943.txt test/compiler/c2/6340864/TestByteVect.java test/compiler/c2/6340864/TestDoubleVect.java test/compiler/c2/6340864/TestFloatVect.java test/compiler/c2/6340864/TestIntVect.java test/compiler/c2/6340864/TestLongVect.java test/compiler/c2/6340864/TestShortVect.java test/compiler/c2/6443505/Test6443505.java test/compiler/c2/6589834/InlinedArrayCloneTestCase.java test/compiler/c2/6589834/Test_ia32.java test/compiler/c2/6603011/Test.java test/compiler/c2/6636138/Test1.java test/compiler/c2/6636138/Test2.java test/compiler/c2/6646019/Test.java test/compiler/c2/6646020/Tester.java test/compiler/c2/6661247/Test.java test/compiler/c2/6663621/IVTest.java test/compiler/c2/6663848/Tester.java test/compiler/c2/6663854/Test6663854.java test/compiler/c2/6695810/Test.java test/compiler/c2/6700047/Test6700047.java test/compiler/c2/6711100/Test.java test/compiler/c2/6711117/Test.java test/compiler/c2/6712835/Test6712835.java test/compiler/c2/6714694/Tester.java test/compiler/c2/6724218/Test.java test/compiler/c2/6732154/Test6732154.java test/compiler/c2/6741738/Tester.java test/compiler/c2/6772683/InterruptedTest.java test/compiler/c2/6792161/Test6792161.java test/compiler/c2/6795362/Test6795362.java test/compiler/c2/6796786/Test6796786.java test/compiler/c2/6799693/Test.java test/compiler/c2/6800154/Test6800154.java test/compiler/c2/6805724/Test6805724.java test/compiler/c2/6823453/Test.java test/compiler/c2/6832293/Test.java test/compiler/c2/6837011/Test6837011.java test/compiler/c2/6837094/Test.java test/compiler/c2/6843752/Test.java test/compiler/c2/6851282/Test.java test/compiler/c2/6852078/Test6852078.java test/compiler/c2/6857159/Test6857159.java test/compiler/c2/6863155/Test6863155.java test/compiler/c2/6865031/Test.java test/compiler/c2/6866651/Test.java test/compiler/c2/6877254/Test.java test/compiler/c2/6880034/Test6880034.java test/compiler/c2/6885584/Test6885584.java test/compiler/c2/6894807/IsInstanceTest.java test/compiler/c2/6901572/Test.java test/compiler/c2/6910484/Test.java test/compiler/c2/6910605/Test.java test/compiler/c2/6910618/Test.java test/compiler/c2/6912517/Test.java test/compiler/c2/6916644/Test6916644.java test/compiler/c2/6921969/TestMultiplyLongHiZero.java test/compiler/c2/6930043/Test6930043.java test/compiler/c2/6946040/TestCharShortByteSwap.java test/compiler/c2/6956668/Test6956668.java test/compiler/c2/6958485/Test.java test/compiler/c2/6968348/Test6968348.java test/compiler/c2/6973329/Test.java test/compiler/c2/7002666/Test7002666.java test/compiler/c2/7009359/Test7009359.java test/compiler/c2/7017746/Test.java test/compiler/c2/7024475/Test7024475.java test/compiler/c2/7029152/Test.java test/compiler/c2/7041100/Test7041100.java test/compiler/c2/7046096/Test7046096.java test/compiler/c2/7047069/Test7047069.java test/compiler/c2/7048332/Test7048332.java test/compiler/c2/7068051/Test7068051.java test/compiler/c2/7070134/Stemmer.java test/compiler/c2/7070134/words test/compiler/c2/7110586/Test7110586.java test/compiler/c2/7125879/Test7125879.java test/compiler/c2/7160610/Test7160610.java test/compiler/c2/7169782/Test7169782.java test/compiler/c2/7174363/Test7174363.java test/compiler/c2/7177917/Test7177917.java test/compiler/c2/7179138/Test7179138_1.java test/compiler/c2/7179138/Test7179138_2.java test/compiler/c2/7190310/Test7190310.java test/compiler/c2/7190310/Test7190310_unsafe.java test/compiler/c2/7192963/TestByteVect.java test/compiler/c2/7192963/TestDoubleVect.java test/compiler/c2/7192963/TestFloatVect.java test/compiler/c2/7192963/TestIntVect.java test/compiler/c2/7192963/TestLongVect.java test/compiler/c2/7192963/TestShortVect.java test/compiler/c2/7199742/Test7199742.java test/compiler/c2/7200264/Test7200264.sh test/compiler/c2/7200264/TestIntVect.java test/compiler/c2/8000805/Test8000805.java test/compiler/c2/8002069/Test8002069.java test/compiler/c2/8004741/Test8004741.java test/compiler/c2/8004867/TestIntAtomicCAS.java test/compiler/c2/8004867/TestIntAtomicOrdered.java test/compiler/c2/8004867/TestIntAtomicVolatile.java test/compiler/c2/8004867/TestIntUnsafeCAS.java test/compiler/c2/8004867/TestIntUnsafeOrdered.java test/compiler/c2/8004867/TestIntUnsafeVolatile.java test/compiler/c2/8005956/PolynomialRoot.java test/compiler/c2/8007294/Test8007294.java test/compiler/c2/8007722/Test8007722.java test/compiler/codegen/6378821/Test6378821.java test/compiler/codegen/6431242/Test.java test/compiler/codegen/6797305/Test6797305.java test/compiler/codegen/6814842/Test6814842.java test/compiler/codegen/6823354/Test6823354.java test/compiler/codegen/6875866/Test.java test/compiler/codegen/6879902/Test6879902.java test/compiler/codegen/6896617/Test6896617.java test/compiler/codegen/6909839/Test6909839.java test/compiler/codegen/6935535/Test.java test/compiler/codegen/6942326/Test.java test/compiler/codegen/7009231/Test7009231.java test/compiler/codegen/7088419/CRCTest.java test/compiler/codegen/7100757/Test7100757.java test/compiler/codegen/7119644/TestBooleanVect.java test/compiler/codegen/7119644/TestByteDoubleVect.java test/compiler/codegen/7119644/TestByteFloatVect.java test/compiler/codegen/7119644/TestByteIntVect.java test/compiler/codegen/7119644/TestByteLongVect.java test/compiler/codegen/7119644/TestByteShortVect.java test/compiler/codegen/7119644/TestByteVect.java test/compiler/codegen/7119644/TestCharShortVect.java test/compiler/codegen/7119644/TestCharVect.java test/compiler/codegen/7119644/TestDoubleVect.java test/compiler/codegen/7119644/TestFloatDoubleVect.java test/compiler/codegen/7119644/TestFloatVect.java test/compiler/codegen/7119644/TestIntDoubleVect.java test/compiler/codegen/7119644/TestIntFloatVect.java test/compiler/codegen/7119644/TestIntLongVect.java test/compiler/codegen/7119644/TestIntVect.java test/compiler/codegen/7119644/TestLongDoubleVect.java test/compiler/codegen/7119644/TestLongFloatVect.java test/compiler/codegen/7119644/TestLongVect.java test/compiler/codegen/7119644/TestShortDoubleVect.java test/compiler/codegen/7119644/TestShortFloatVect.java test/compiler/codegen/7119644/TestShortIntVect.java test/compiler/codegen/7119644/TestShortLongVect.java test/compiler/codegen/7119644/TestShortVect.java test/compiler/codegen/7184394/TestAESBase.java test/compiler/codegen/7184394/TestAESDecode.java test/compiler/codegen/7184394/TestAESEncode.java test/compiler/codegen/7184394/TestAESMain.java test/compiler/codegen/8001183/TestCharVect.java test/compiler/codegen/8005033/Test8005033.java test/compiler/codegen/8011901/Test8011901.java test/compiler/codegen/8144028/BitTests.java test/compiler/eliminateAutobox/6934604/TestByteBoxing.java test/compiler/eliminateAutobox/6934604/TestDoubleBoxing.java test/compiler/eliminateAutobox/6934604/TestFloatBoxing.java test/compiler/eliminateAutobox/6934604/TestIntBoxing.java test/compiler/eliminateAutobox/6934604/TestLongBoxing.java test/compiler/eliminateAutobox/6934604/TestShortBoxing.java test/compiler/escapeAnalysis/6689060/Test.java test/compiler/escapeAnalysis/6716441/Tester.java test/compiler/escapeAnalysis/6726999/Test.java test/compiler/escapeAnalysis/6775880/Test.java test/compiler/escapeAnalysis/6795161/Test.java test/compiler/escapeAnalysis/6895383/Test.java test/compiler/escapeAnalysis/6896727/Test.java test/compiler/interpreter/6539464/Test.java test/compiler/interpreter/6833129/Test.java test/compiler/interpreter/7116216/LargeFrame.java test/compiler/interpreter/7116216/StackOverflow.java test/compiler/intrinsics/6982370/Test6982370.java test/compiler/intrinsics/8005419/Test8005419.java test/compiler/intrinsics/adler32/TestAdler32.java test/compiler/intrinsics/class/TestClassIsPrimitive.java test/compiler/intrinsics/classcast/NullCheckDroppingsTest.java test/compiler/intrinsics/clone/TestObjectClone.java test/compiler/intrinsics/crc32/TestCRC32.java test/compiler/intrinsics/crc32c/TestCRC32C.java test/compiler/intrinsics/hashcode/TestHashCode.java test/compiler/intrinsics/montgomerymultiply/MontgomeryMultiplyTest.java test/compiler/intrinsics/muladd/TestMulAdd.java test/compiler/intrinsics/multiplytolen/TestMultiplyToLen.java test/compiler/intrinsics/multiplytolen/TestMultiplyToLenReturnProfile.java test/compiler/intrinsics/squaretolen/TestSquareToLen.java test/compiler/intrinsics/stringequals/TestStringEqualsBadLength.java test/compiler/jsr292/6990212/Test6990212.java test/compiler/jsr292/7082949/Test7082949.java test/compiler/loopopts/6659207/Test.java test/compiler/loopopts/6855164/Test.java test/compiler/loopopts/6860469/Test.java test/compiler/loopopts/7044738/Test7044738.java test/compiler/loopopts/7052494/Test7052494.java test/compiler/native/TestDirtyInt.java test/compiler/native/libTestDirtyInt.c test/compiler/runtime/6778657/Test.java test/compiler/runtime/6826736/Test.java test/compiler/runtime/6859338/Test6859338.java test/compiler/runtime/6863420/Test.java test/compiler/runtime/6865265/StackOverflowBug.java test/compiler/runtime/6891750/Test6891750.java test/compiler/runtime/6892265/Test.java test/compiler/runtime/7088020/Test7088020.java test/compiler/runtime/7141637/SpreadNullArg.java test/compiler/runtime/7196199/Test7196199.java test/compiler/runtime/8010927/Test8010927.java test/compiler/runtime/8015436/Test8015436.java test/compiler/uncommontrap/8009761/Test8009761.java test/runtime/7107135/Test.java test/runtime/7107135/Test7107135.sh test/runtime/7107135/TestMT.java test/runtime/7107135/test.c test/runtime/StackGuardPages/invoke.c test/runtime/Unsafe/GetUnsafe.java test/runtime/jsig/Test8017498.sh test/runtime/jsig/TestJNI.c test/serviceability/sa/TestClassLoaderStats.java test/serviceability/sa/TestStackTrace.java
diffstat 1597 files changed, 180884 insertions(+), 189396 deletions(-) [+]
line wrap: on
line diff
--- a/.mx.jvmci/mx_jvmci.py	Thu Aug 04 15:52:14 2016 -0700
+++ b/.mx.jvmci/mx_jvmci.py	Fri Aug 05 09:50:25 2016 -0700
@@ -64,7 +64,7 @@
 _jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
 
 # TODO: add client once/if it can be built on 64-bit platforms
-_jdkJvmVariants = ['server']
+_jdkJvmVariants = ['server', 'client']
 
 """
 Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
--- a/make/gensrc/GensrcAdlc.gmk	Thu Aug 04 15:52:14 2016 -0700
+++ b/make/gensrc/GensrcAdlc.gmk	Fri Aug 05 09:50:25 2016 -0700
@@ -51,6 +51,9 @@
     ADLC_CFLAGS_WARNINGS := -W3 -D_CRT_SECURE_NO_WARNINGS
   endif
 
+  # Set the C++ standard if supported
+  ADLC_CFLAGS += $(CXXSTD_CXXFLAG)
+  
   # NOTE: The old build didn't set -DASSERT for windows but it doesn't seem to
   # hurt.
   ADLC_CFLAGS += -DASSERT
@@ -153,10 +156,10 @@
 	$(call MakeDir, $(@D))
 	$(call ExecuteWithLog, $(ADLC_SUPPORT_DIR)/adlc_run, \
 	    $(FIXPATH) $(ADLC_TOOL) $(ADLCFLAGS) $(SINGLE_AD_SRCFILE) \
-	        -c$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU).cpp \
-	        -h$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU).hpp \
-	        -a$(ADLC_SUPPORT_DIR)/dfa_$(HOTSPOT_TARGET_CPU).cpp \
-	        -v$(ADLC_SUPPORT_DIR)/adGlobals_$(HOTSPOT_TARGET_CPU).hpp)
+	        -c$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
+	        -h$(ADLC_SUPPORT_DIR)/ad_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
+	        -a$(ADLC_SUPPORT_DIR)/dfa_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
+	        -v$(ADLC_SUPPORT_DIR)/adGlobals_$(HOTSPOT_TARGET_CPU_ARCH).hpp)
 	$(TOUCH) $@
 
   ##############################################################################
@@ -164,17 +167,17 @@
   # and postprocess them by fixing dummy #line directives.
 
   ADLC_GENERATED_FILES := $(addprefix $(JVM_VARIANT_OUTPUTDIR)/gensrc/adfiles/, \
-      ad_$(HOTSPOT_TARGET_CPU).cpp \
-      ad_$(HOTSPOT_TARGET_CPU).hpp \
-      ad_$(HOTSPOT_TARGET_CPU)_clone.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_expand.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_format.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_gen.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_misc.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_peephole.cpp \
-      ad_$(HOTSPOT_TARGET_CPU)_pipeline.cpp \
-      adGlobals_$(HOTSPOT_TARGET_CPU).hpp \
-      dfa_$(HOTSPOT_TARGET_CPU).cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_clone.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_expand.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_format.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_gen.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_misc.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_peephole.cpp \
+      ad_$(HOTSPOT_TARGET_CPU_ARCH)_pipeline.cpp \
+      adGlobals_$(HOTSPOT_TARGET_CPU_ARCH).hpp \
+      dfa_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
   )
 
   $(JVM_VARIANT_OUTPUTDIR)/gensrc/adfiles/%: $(ADLC_RUN_MARKER)
--- a/make/lib/CompileGtest.gmk	Thu Aug 04 15:52:14 2016 -0700
+++ b/make/lib/CompileGtest.gmk	Fri Aug 05 09:50:25 2016 -0700
@@ -104,7 +104,7 @@
         -I$(GTEST_FRAMEWORK_SRC)/include, \
     CFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
     CXXFLAGS_DEBUG_SYMBOLS := $(JVM_CFLAGS_SYMBOLS), \
-    LDFLAGS := $(LDFLAGS_TESTEXE), \
+    LDFLAGS := $(LDFLAGS_JDKEXE), \
     LDFLAGS_unix := -L$(JVM_OUTPUTDIR)/gtest $(call SET_SHARED_LIBRARY_ORIGIN), \
     LDFLAGS_solaris := -library=stlport4, \
     LIBS_unix := -ljvm, \
--- a/make/lib/CompileJvm.gmk	Thu Aug 04 15:52:14 2016 -0700
+++ b/make/lib/CompileJvm.gmk	Fri Aug 05 09:50:25 2016 -0700
@@ -60,12 +60,15 @@
     -I$(HOTSPOT_TOPDIR)/src/share/vm/prims \
     #
 
+# INCLUDE_SUFFIX_* is only meant for including the proper
+# platform files. Don't use it to guard code. Use the value of
+# HOTSPOT_TARGET_CPU_DEFINE etc. instead.
+# Remaining TARGET_ARCH_* is needed to distinguish closed and open
+# 64-bit ARM ports (also called AARCH64).
 JVM_CFLAGS_TARGET_DEFINES += \
-    -DTARGET_OS_FAMILY_$(HOTSPOT_TARGET_OS) \
-    -DTARGET_ARCH_MODEL_$(HOTSPOT_TARGET_CPU) \
     -DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
-    -DTARGET_OS_ARCH_MODEL_$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU) \
-    -DTARGET_OS_ARCH_$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH) \
+    -DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
+    -DINCLUDE_SUFFIX_CPU=_$(HOTSPOT_TARGET_CPU_ARCH) \
     -DTARGET_COMPILER_$(HOTSPOT_TOOLCHAIN_TYPE) \
     -D$(HOTSPOT_TARGET_CPU_DEFINE) \
     -DHOTSPOT_LIB_ARCH='"$(OPENJDK_TARGET_CPU_LEGACY_LIB)"' \
--- a/make/test/JtregNative.gmk	Thu Aug 04 15:52:14 2016 -0700
+++ b/make/test/JtregNative.gmk	Fri Aug 05 09:50:25 2016 -0700
@@ -53,6 +53,8 @@
     $(HOTSPOT_TOPDIR)/test/compiler/native \
     $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
     $(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
+    $(HOTSPOT_TOPDIR)/test/compiler/jvmci/jdk.vm.ci.code.test \
+    $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetModulesInfo \
     #
 
 # Add conditional directories here when needed.
@@ -62,12 +64,26 @@
     $(HOTSPOT_TOPDIR)/test/runtime/ThreadSignalMask
 endif
 
+ifeq ($(OPENJDK_TARGET_OS), linux)
+BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
+    $(HOTSPOT_TOPDIR)/test/runtime/execstack \
+    $(HOTSPOT_TOPDIR)/test/runtime/jsig \
+    $(HOTSPOT_TOPDIR)/test/runtime/StackGuardPages
+endif
+
 ifeq ($(TOOLCHAIN_TYPE), solstudio)
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
 endif
 
+ifeq ($(OPENJDK_TARGET_OS), linux)
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rw := -z noexecstack
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LDFLAGS_exeinvoke := -ljvm -lpthread
+    BUILD_TEST_invoke_exeinvoke.c_OPTIMIZATION := NONE
+endif
+
 BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native
 
 BUILD_HOTSPOT_JTREG_IMAGE_DIR := $(TEST_IMAGE_DIR)/hotspot/jtreg
--- a/src/cpu/aarch64/vm/aarch64.ad	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/aarch64.ad	Fri Aug 05 09:50:25 2016 -0700
@@ -1942,11 +1942,34 @@
 
   bool is_CAS(int opcode)
   {
-    return (opcode == Op_CompareAndSwapI ||
-	    opcode == Op_CompareAndSwapL ||
-	    opcode == Op_CompareAndSwapN ||
-	    opcode == Op_CompareAndSwapP);
-  }
+    switch(opcode) {
+      // We handle these
+    case Op_CompareAndSwapI:
+    case Op_CompareAndSwapL:
+    case Op_CompareAndSwapP:
+    case Op_CompareAndSwapN:
+ // case Op_CompareAndSwapB:
+ // case Op_CompareAndSwapS:
+      return true;
+      // These are TBD
+    case Op_WeakCompareAndSwapB:
+    case Op_WeakCompareAndSwapS:
+    case Op_WeakCompareAndSwapI:
+    case Op_WeakCompareAndSwapL:
+    case Op_WeakCompareAndSwapP:
+    case Op_WeakCompareAndSwapN:
+    case Op_CompareAndExchangeB:
+    case Op_CompareAndExchangeS:
+    case Op_CompareAndExchangeI:
+    case Op_CompareAndExchangeL:
+    case Op_CompareAndExchangeP:
+    case Op_CompareAndExchangeN:
+      return false;
+    default:
+      return false;
+    }
+  }
+
 
   // leading_to_trailing
   //
@@ -3330,9 +3353,6 @@
 const bool Matcher::match_rule_supported(int opcode) {
 
   switch (opcode) {
-  case Op_StrComp:
-    if (CompactStrings)  return false;
-    break;
   default:
     break;
   }
@@ -4241,14 +4261,16 @@
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               Assembler::xword, /*acquire*/ false, /*release*/ true);
+               Assembler::xword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, noreg);
   %}
 
   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               Assembler::word, /*acquire*/ false, /*release*/ true);
+               Assembler::word, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, noreg);
   %}
 
 
@@ -4260,14 +4282,16 @@
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               Assembler::xword, /*acquire*/ true, /*release*/ true);
+               Assembler::xword, /*acquire*/ true, /*release*/ true,
+               /*weak*/ false, noreg);
   %}
 
   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
     MacroAssembler _masm(&cbuf);
     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
-               Assembler::word, /*acquire*/ true, /*release*/ true);
+               Assembler::word, /*acquire*/ true, /*release*/ true,
+               /*weak*/ false, noreg);
   %}
 
 
@@ -5806,6 +5830,7 @@
 %{
   constraint(ALLOC_IN_RC(no_special_reg));
   match(RegL);
+  match(iRegL_R0);
   format %{ %}
   interface(REG_INTER);
 %}
@@ -5927,6 +5952,39 @@
   interface(REG_INTER);
 %}
 
+// Long 64 bit Register R0 only
+operand iRegL_R0()
+%{
+  constraint(ALLOC_IN_RC(r0_reg));
+  match(RegL);
+  match(iRegLNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Long 64 bit Register R2 only
+operand iRegL_R2()
+%{
+  constraint(ALLOC_IN_RC(r2_reg));
+  match(RegL);
+  match(iRegLNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Long 64 bit Register R3 only
+operand iRegL_R3()
+%{
+  constraint(ALLOC_IN_RC(r3_reg));
+  match(RegL);
+  match(iRegLNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Long 64 bit Register R11 only
 operand iRegL_R11()
 %{
@@ -5983,7 +6041,7 @@
 %}
 
 
-// Register R2 only
+// Register R4 only
 operand iRegI_R4()
 %{
   constraint(ALLOC_IN_RC(int_r4_reg));
@@ -6007,6 +6065,33 @@
   interface(REG_INTER);
 %}
 
+operand iRegN_R0()
+%{
+  constraint(ALLOC_IN_RC(r0_reg));
+  match(iRegN);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand iRegN_R2()
+%{
+  constraint(ALLOC_IN_RC(r2_reg));
+  match(iRegN);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand iRegN_R3()
+%{
+  constraint(ALLOC_IN_RC(r3_reg));
+  match(iRegN);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
 // Integer 64 bit Register not Special
 operand iRegNNoSp()
 %{
@@ -9393,12 +9478,12 @@
   ins_pipe(pipe_slow);
 %}
 
+// standard CompareAndSwapX when we are using barriers
+// these have higher priority than the rules selected by a predicate
+
 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 // can't match them
 
-// standard CompareAndSwapX when we are using barriers
-// these have higher priority than the rules selected by a predicate
-
 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 
   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
@@ -9550,6 +9635,216 @@
 %}
 
 
+// ---------------------------------------------------------------------
+// Sundry CAS operations.  Note that release is always true,
+// regardless of the memory ordering of the CAS.  This is because we
+// need the volatile case to be sequentially consistent but there is
+// no trailing StoreLoad barrier emitted by C2.  Unfortunately we
+// can't check the type of memory ordering here, so we always emit a
+// STLXR.
+
+// This section is generated from aarch64_ad_cas.m4
+
+
+instruct compareAndExchangeB(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ uxtbw(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::byte, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+    __ sxtbw($res$$Register, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeS(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ uxthw(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::halfword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+    __ sxthw($res$$Register, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeI(iRegI_R0 res, indirect mem, iRegI_R2 oldval, iRegI_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::word, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeL(iRegL_R0 res, indirect mem, iRegL_R2 oldval, iRegL_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::xword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeN(iRegN_R0 res, indirect mem, iRegN_R2 oldval, iRegN_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::word, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndExchangeP(iRegP_R0 res, indirect mem, iRegP_R2 oldval, iRegP_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::xword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ false, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ uxtbw(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::byte, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ uxthw(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::halfword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::word, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::xword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::word, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+  ins_cost(2 * VOLATILE_REF_COST);
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::xword, /*acquire*/ false, /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}
+// ---------------------------------------------------------------------
+
 instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
   match(Set prev (GetAndSetI mem newv));
   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
@@ -14988,11 +15283,61 @@
   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
   ins_encode %{
     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
-    __ asrw($cnt1$$Register, $cnt1$$Register, 1);
-    __ asrw($cnt2$$Register, $cnt2$$Register, 1);
     __ string_compare($str1$$Register, $str2$$Register,
                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
-                      $tmp1$$Register);
+                      $tmp1$$Register,
+                      fnoreg, fnoreg, StrIntrinsicNode::UU);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
+                        iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr)
+%{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
+
+  format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
+  ins_encode %{
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register, $result$$Register,
+                      $tmp1$$Register,
+                      fnoreg, fnoreg, StrIntrinsicNode::LL);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
+                        iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
+%{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
+
+  format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
+  ins_encode %{
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register, $result$$Register,
+                      $tmp1$$Register,
+                      $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
+                        iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr)
+%{
+  predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr);
+
+  format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
+  ins_encode %{
+    __ string_compare($str1$$Register, $str2$$Register,
+                      $cnt1$$Register, $cnt2$$Register, $result$$Register,
+                      $tmp1$$Register,
+                      $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU);
   %}
   ins_pipe(pipe_class_memory);
 %}
--- a/src/cpu/aarch64/vm/bytes_aarch64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/bytes_aarch64.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -67,9 +67,6 @@
 
 
 // The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
-
-#ifdef TARGET_OS_ARCH_linux_aarch64
-# include "bytes_linux_aarch64.inline.hpp"
-#endif
+#include OS_CPU_HEADER_INLINE(bytes)
 
 #endif // CPU_AARCH64_VM_BYTES_AARCH64_HPP
--- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1556,13 +1556,13 @@
 }
 
 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
-  __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
+  __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
   __ cset(rscratch1, Assembler::NE);
   __ membar(__ AnyAny);
 }
 
 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
-  __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
+  __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
   __ cset(rscratch1, Assembler::NE);
   __ membar(__ AnyAny);
 }
--- a/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -808,7 +808,6 @@
   } else {
     a = new LIR_Address(obj.result(),
                         offset.result(),
-                        LIR_Address::times_1,
                         0,
                         as_BasicType(type));
   }
@@ -1002,7 +1001,6 @@
 
       LIR_Address* a = new LIR_Address(base_op,
                                        index,
-                                       LIR_Address::times_1,
                                        offset,
                                        T_BYTE);
       BasicTypeList signature(3);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/aarch64/vm/c1_LIR_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/register.hpp"
+#include "c1/c1_LIR.hpp"
+
+FloatRegister LIR_OprDesc::as_float_reg() const {
+  return as_FloatRegister(fpu_regnr());
+}
+
+FloatRegister LIR_OprDesc::as_double_reg() const {
+  return as_FloatRegister(fpu_regnrLo());
+}
+
+// Reg2 unused.
+LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
+  assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
+  return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+                             (reg1 << LIR_OprDesc::reg2_shift) |
+                             LIR_OprDesc::double_type          |
+                             LIR_OprDesc::fpu_register         |
+                             LIR_OprDesc::double_size);
+}
+
+#ifndef PRODUCT
+void LIR_Address::verify() const {
+  assert(base()->is_cpu_register(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_double_cpu() || index()->is_single_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
+         "wrong type for addresses");
+}
+#endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/aarch64/vm/cas.m4	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,109 @@
+// Sundry CAS operations.  Note that release is always true,
+// regardless of the memory ordering of the CAS.  This is because we
+// need the volatile case to be sequentially consistent but there is
+// no trailing StoreLoad barrier emitted by C2.  Unfortunately we
+// can't check the type of memory ordering here, so we always emit a
+// STLXR.
+
+define(`CAS_INSN',
+`
+instruct compareAndExchange$1$5(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
+  ifelse($5,Acq,'  predicate(needs_acquiring_load_exclusive(n));
+  ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
+               /*weak*/ false, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
+define(`CAS_INSN4',
+`
+instruct compareAndExchange$1$7(iReg$2_R0 res, indirect mem, iReg$2_R2 oldval, iReg$2_R3 newval, rFlagsReg cr) %{
+  match(Set res (CompareAndExchange$1 mem (Binary oldval newval)));
+  ifelse($7,Acq,'  predicate(needs_acquiring_load_exclusive(n));
+  ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
+  %}
+  ins_encode %{
+    __ $5(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
+               /*weak*/ false, $res$$Register);
+    __ $6($res$$Register, $res$$Register);
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
+CAS_INSN4(B,I,byte,byte,uxtbw,sxtbw)
+CAS_INSN4(S,I,short,halfword,uxthw,sxthw)
+CAS_INSN(I,I,int,word)
+CAS_INSN(L,L,long,xword)
+CAS_INSN(N,N,narrow oop,word)
+CAS_INSN(P,P,ptr,xword)
+dnl
+dnl CAS_INSN4(B,I,byte,byte,uxtbw,sxtbw,Acq)
+dnl CAS_INSN4(S,I,short,halfword,uxthw,sxthw,Acq)
+dnl CAS_INSN(I,I,int,word,Acq)
+dnl CAS_INSN(L,L,long,xword,Acq)
+dnl CAS_INSN(N,N,narrow oop,word,Acq)
+dnl CAS_INSN(P,P,ptr,xword,Acq)
+dnl
+define(`CAS_INSN2',
+`
+instruct weakCompareAndSwap$1$6(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
+  ifelse($6,Acq,'  predicate(needs_acquiring_load_exclusive(n));
+  ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ uxt$5(rscratch2, $oldval$$Register);
+    __ cmpxchg($mem$$Register, rscratch2, $newval$$Register,
+               Assembler::$4, /*acquire*/ ifelse($6,Acq,true,false), /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
+define(`CAS_INSN3',
+`
+instruct weakCompareAndSwap$1$5(iRegINoSp res, indirect mem, iReg$2 oldval, iReg$2 newval, rFlagsReg cr) %{
+  match(Set res (WeakCompareAndSwap$1 mem (Binary oldval newval)));
+  ifelse($5,Acq,'  predicate(needs_acquiring_load_exclusive(n));
+  ins_cost(VOLATILE_REF_COST);`,'  ins_cost(2 * VOLATILE_REF_COST);`)
+  effect(KILL cr);
+  format %{
+    "cmpxchg $res = $mem, $oldval, $newval\t# ($3, weak) if $mem == $oldval then $mem <-- $newval"
+    "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+  %}
+  ins_encode %{
+    __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
+               Assembler::$4, /*acquire*/ ifelse($5,Acq,true,false), /*release*/ true,
+               /*weak*/ true, noreg);
+    __ csetw($res$$Register, Assembler::EQ);
+  %}
+  ins_pipe(pipe_slow);
+%}')dnl
+CAS_INSN2(B,I,byte,byte,bw)
+CAS_INSN2(S,I,short,halfword,hw)
+CAS_INSN3(I,I,int,word)
+CAS_INSN3(L,L,long,xword)
+CAS_INSN3(N,N,narrow oop,word)
+CAS_INSN3(P,P,ptr,xword)
+dnl CAS_INSN2(B,I,byte,byte,bw,Acq)
+dnl CAS_INSN2(S,I,short,halfword,hw,Acq)
+dnl CAS_INSN3(I,I,int,word,Acq)
+dnl CAS_INSN3(L,L,long,xword,Acq)
+dnl CAS_INSN3(N,N,narrow oop,word,Acq)
+dnl CAS_INSN3(P,P,ptr,xword,Acq)
+dnl
--- a/src/cpu/aarch64/vm/copy_aarch64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/copy_aarch64.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,9 +29,7 @@
 // Inline functions for memory copy and fill.
 
 // Contains inline asm implementations
-#ifdef TARGET_OS_ARCH_linux_aarch64
-# include "copy_linux_aarch64.inline.hpp"
-#endif
+#include OS_CPU_HEADER_INLINE(copy)
 
 
 static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/globals_aarch64.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -70,11 +70,7 @@
 
 define_pd_global(uintx, TypeProfileLevel, 111);
 
-// No performance work done here yet.
-define_pd_global(bool, CompactStrings, false);
-
-// avoid biased locking while we are bootstrapping the aarch64 build
-define_pd_global(bool, UseBiasedLocking, false);
+define_pd_global(bool, CompactStrings, true);
 
 // Clear short arrays bigger than one word in an arch-specific way
 define_pd_global(intx, InitArrayShortSize, BytesPerLong);
@@ -118,6 +114,7 @@
 // Don't attempt to use Neon on builtin sim until builtin sim supports it
 #define UseCRC32 false
 #define UseSIMDForMemoryOps    false
+#define AvoidUnalignedAcesses false
 
 #else
 #define UseBuiltinSim           false
@@ -144,6 +141,8 @@
           "Use CRC32 instructions for CRC32 computation")               \
   product(bool, UseSIMDForMemoryOps, false,                             \
           "Use SIMD instructions in generated memory move code")        \
+  product(bool, AvoidUnalignedAccesses, false,                          \
+          "Avoid generating unaligned memory accesses")                 \
   product(bool, UseLSE, false,                                          \
           "Use LSE instructions")                                       \
   product(bool, UseBlockZeroing, true,                                  \
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -36,6 +36,7 @@
 #include "oops/klass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "opto/compile.hpp"
+#include "opto/intrinsicnode.hpp"
 #include "opto/node.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/icache.hpp"
@@ -565,11 +566,6 @@
   br(Assembler::EQ, done);
 }
 
-
-// added to make this compile
-
-REGISTER_DEFINITION(Register, noreg);
-
 static void pass_arg0(MacroAssembler* masm, Register arg) {
   if (c_rarg0 != arg ) {
     masm->mov(c_rarg0, arg);
@@ -2145,30 +2141,40 @@
     b(*fail);
 }
 
-// A generic CAS; success or failure is in the EQ flag.
+// A generic CAS; success or failure is in the EQ flag.  A weak CAS
+// doesn't retry and may fail spuriously.  If the oldval is wanted,
+// Pass a register for the result, otherwise pass noreg.
+
+// Clobbers rscratch1
 void MacroAssembler::cmpxchg(Register addr, Register expected,
                              Register new_val,
                              enum operand_size size,
                              bool acquire, bool release,
-                             Register tmp) {
+                             bool weak,
+                             Register result) {
+  if (result == noreg)  result = rscratch1;
   if (UseLSE) {
-    mov(tmp, expected);
-    lse_cas(tmp, new_val, addr, size, acquire, release, /*not_pair*/ true);
-    cmp(tmp, expected);
+    mov(result, expected);
+    lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true);
+    cmp(result, expected);
   } else {
     BLOCK_COMMENT("cmpxchg {");
     Label retry_load, done;
     if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
       prfm(Address(addr), PSTL1STRM);
     bind(retry_load);
-    load_exclusive(tmp, addr, size, acquire);
+    load_exclusive(result, addr, size, acquire);
     if (size == xword)
-      cmp(tmp, expected);
+      cmp(result, expected);
     else
-      cmpw(tmp, expected);
+      cmpw(result, expected);
     br(Assembler::NE, done);
-    store_exclusive(tmp, new_val, addr, size, release);
-    cbnzw(tmp, retry_load);
+    store_exclusive(rscratch1, new_val, addr, size, release);
+    if (weak) {
+      cmpw(rscratch1, 0u);  // If the store fails, return NE to our caller.
+    } else {
+      cbnzw(rscratch1, retry_load);
+    }
     bind(done);
     BLOCK_COMMENT("} cmpxchg");
   }
@@ -4500,21 +4506,49 @@
   BIND(DONE);
 }
 
+typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
+typedef void (MacroAssembler::* uxt_insn)(Register Rd, Register Rn);
+
 // Compare strings.
 void MacroAssembler::string_compare(Register str1, Register str2,
                                     Register cnt1, Register cnt2, Register result,
-                                    Register tmp1) {
+                                    Register tmp1,
+                                    FloatRegister vtmp, FloatRegister vtmpZ, int ae) {
   Label LENGTH_DIFF, DONE, SHORT_LOOP, SHORT_STRING,
     NEXT_WORD, DIFFERENCE;
 
+  bool isLL = ae == StrIntrinsicNode::LL;
+  bool isLU = ae == StrIntrinsicNode::LU;
+  bool isUL = ae == StrIntrinsicNode::UL;
+
+  bool str1_isL = isLL || isLU;
+  bool str2_isL = isLL || isUL;
+
+  int str1_chr_shift = str1_isL ? 0 : 1;
+  int str2_chr_shift = str2_isL ? 0 : 1;
+  int str1_chr_size = str1_isL ? 1 : 2;
+  int str2_chr_size = str2_isL ? 1 : 2;
+
+  chr_insn str1_load_chr = str1_isL ? (chr_insn)&MacroAssembler::ldrb :
+                                      (chr_insn)&MacroAssembler::ldrh;
+  chr_insn str2_load_chr = str2_isL ? (chr_insn)&MacroAssembler::ldrb :
+                                      (chr_insn)&MacroAssembler::ldrh;
+  uxt_insn ext_chr = isLL ? (uxt_insn)&MacroAssembler::uxtbw :
+                            (uxt_insn)&MacroAssembler::uxthw;
+
   BLOCK_COMMENT("string_compare {");
 
+  // Bizzarely, the counts are passed in bytes, regardless of whether they
+  // are L or U strings, however the result is always in characters.
+  if (!str1_isL) asrw(cnt1, cnt1, 1);
+  if (!str2_isL) asrw(cnt2, cnt2, 1);
+
   // Compute the minimum of the string lengths and save the difference.
   subsw(tmp1, cnt1, cnt2);
   cselw(cnt2, cnt1, cnt2, Assembler::LE); // min
 
   // A very short string
-  cmpw(cnt2, 4);
+  cmpw(cnt2, isLL ? 8:4);
   br(Assembler::LT, SHORT_STRING);
 
   // Check if the strings start at the same location.
@@ -4523,20 +4557,37 @@
 
   // Compare longwords
   {
-    subw(cnt2, cnt2, 4); // The last longword is a special case
+    subw(cnt2, cnt2, isLL ? 8:4); // The last longword is a special case
 
     // Move both string pointers to the last longword of their
     // strings, negate the remaining count, and convert it to bytes.
-    lea(str1, Address(str1, cnt2, Address::uxtw(1)));
-    lea(str2, Address(str2, cnt2, Address::uxtw(1)));
-    sub(cnt2, zr, cnt2, LSL, 1);
+    lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
+    lea(str2, Address(str2, cnt2, Address::uxtw(str2_chr_shift)));
+    if (isLU || isUL) {
+      sub(cnt1, zr, cnt2, LSL, str1_chr_shift);
+      eor(vtmpZ, T16B, vtmpZ, vtmpZ);
+    }
+    sub(cnt2, zr, cnt2, LSL, str2_chr_shift);
 
     // Loop, loading longwords and comparing them into rscratch2.
     bind(NEXT_WORD);
-    ldr(result, Address(str1, cnt2));
-    ldr(cnt1, Address(str2, cnt2));
-    adds(cnt2, cnt2, wordSize);
-    eor(rscratch2, result, cnt1);
+    if (isLU) {
+      ldrs(vtmp, Address(str1, cnt1));
+      zip1(vtmp, T8B, vtmp, vtmpZ);
+      umov(result, vtmp, D, 0);
+    } else {
+      ldr(result, Address(str1, isUL ? cnt1:cnt2));
+    }
+    if (isUL) {
+      ldrs(vtmp, Address(str2, cnt2));
+      zip1(vtmp, T8B, vtmp, vtmpZ);
+      umov(rscratch1, vtmp, D, 0);
+    } else {
+      ldr(rscratch1, Address(str2, cnt2));
+    }
+    adds(cnt2, cnt2, isUL ? 4:8);
+    if (isLU || isUL) add(cnt1, cnt1, isLU ? 4:8);
+    eor(rscratch2, result, rscratch1);
     cbnz(rscratch2, DIFFERENCE);
     br(Assembler::LT, NEXT_WORD);
 
@@ -4544,9 +4595,21 @@
     // same longword twice, but that's still faster than another
     // conditional branch.
 
-    ldr(result, Address(str1));
-    ldr(cnt1, Address(str2));
-    eor(rscratch2, result, cnt1);
+    if (isLU) {
+      ldrs(vtmp, Address(str1));
+      zip1(vtmp, T8B, vtmp, vtmpZ);
+      umov(result, vtmp, D, 0);
+    } else {
+      ldr(result, Address(str1));
+    }
+    if (isUL) {
+      ldrs(vtmp, Address(str2));
+      zip1(vtmp, T8B, vtmp, vtmpZ);
+      umov(rscratch1, vtmp, D, 0);
+    } else {
+      ldr(rscratch1, Address(str2));
+    }
+    eor(rscratch2, result, rscratch1);
     cbz(rscratch2, LENGTH_DIFF);
 
     // Find the first different characters in the longwords and
@@ -4554,12 +4617,12 @@
     bind(DIFFERENCE);
     rev(rscratch2, rscratch2);
     clz(rscratch2, rscratch2);
-    andr(rscratch2, rscratch2, -16);
+    andr(rscratch2, rscratch2, isLL ? -8 : -16);
     lsrv(result, result, rscratch2);
-    uxthw(result, result);
-    lsrv(cnt1, cnt1, rscratch2);
-    uxthw(cnt1, cnt1);
-    subw(result, result, cnt1);
+    (this->*ext_chr)(result, result);
+    lsrv(rscratch1, rscratch1, rscratch2);
+    (this->*ext_chr)(rscratch1, rscratch1);
+    subw(result, result, rscratch1);
     b(DONE);
   }
 
@@ -4568,8 +4631,8 @@
   cbz(cnt2, LENGTH_DIFF);
 
   bind(SHORT_LOOP);
-  load_unsigned_short(result, Address(post(str1, 2)));
-  load_unsigned_short(cnt1, Address(post(str2, 2)));
+  (this->*str1_load_chr)(result, Address(post(str1, str1_chr_size)));
+  (this->*str2_load_chr)(cnt1, Address(post(str2, str2_chr_size)));
   subw(result, result, cnt1);
   cbnz(result, DONE);
   sub(cnt2, cnt2, 1);
@@ -4853,7 +4916,7 @@
   // alignment.
   if (!is_large || !(BlockZeroingLowLimit >= zva_length * 2)) {
     int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
-    cmp(cnt, low_limit >> 3);
+    subs(tmp, cnt, low_limit >> 3);
     br(Assembler::LT, small);
   }
 
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -995,10 +995,11 @@
   }
 
   // A generic CAS; success or failure is in the EQ flag.
+  // Clobbers rscratch1
   void cmpxchg(Register addr, Register expected, Register new_val,
                enum operand_size size,
-               bool acquire, bool release,
-               Register tmp = rscratch1);
+               bool acquire, bool release, bool weak,
+               Register result);
 
   // Calls
 
@@ -1198,7 +1199,8 @@
 
   void string_compare(Register str1, Register str2,
                       Register cnt1, Register cnt2, Register result,
-                      Register tmp1);
+                      Register tmp1,
+                      FloatRegister vtmp, FloatRegister vtmpZ, int ae);
 
   void arrays_equals(Register a1, Register a2,
                      Register result, Register cnt1,
--- a/src/cpu/aarch64/vm/register_definitions_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/register_definitions_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -29,6 +29,8 @@
 #include "register_aarch64.hpp"
 # include "interp_masm_aarch64.hpp"
 
+REGISTER_DEFINITION(Register, noreg);
+
 REGISTER_DEFINITION(Register, r0);
 REGISTER_DEFINITION(Register, r1);
 REGISTER_DEFINITION(Register, r2);
@@ -62,6 +64,8 @@
 REGISTER_DEFINITION(Register, r30);
 REGISTER_DEFINITION(Register, sp);
 
+REGISTER_DEFINITION(FloatRegister, fnoreg);
+
 REGISTER_DEFINITION(FloatRegister, v0);
 REGISTER_DEFINITION(FloatRegister, v1);
 REGISTER_DEFINITION(FloatRegister, v2);
--- a/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -801,6 +801,12 @@
     StubCodeMark mark(this, "StubRoutines", stub_name);
     __ align(CodeEntryAlignment);
     __ bind(start);
+
+    Label unaligned_copy_long;
+    if (AvoidUnalignedAccesses) {
+      __ tbnz(d, 3, unaligned_copy_long);
+    }
+
     if (direction == copy_forwards) {
       __ sub(s, s, bias);
       __ sub(d, d, bias);
@@ -901,6 +907,198 @@
     }
 
     __ ret(lr);
+
+    if (AvoidUnalignedAccesses) {
+      Label drain, again;
+      // Register order for storing. Order is different for backward copy.
+
+      __ bind(unaligned_copy_long);
+
+      // source address is even aligned, target odd aligned
+      //
+      // when forward copying word pairs we read long pairs at offsets
+      // {0, 2, 4, 6} (in long words). when backwards copying we read
+      // long pairs at offsets {-2, -4, -6, -8}. We adjust the source
+      // address by -2 in the forwards case so we can compute the
+      // source offsets for both as {2, 4, 6, 8} * unit where unit = 1
+      // or -1.
+      //
+      // when forward copying we need to store 1 word, 3 pairs and
+      // then 1 word at offsets {0, 1, 3, 5, 7}. Rather thna use a
+      // zero offset We adjust the destination by -1 which means we
+      // have to use offsets { 1, 2, 4, 6, 8} * unit for the stores.
+      //
+      // When backwards copyng we need to store 1 word, 3 pairs and
+      // then 1 word at offsets {-1, -3, -5, -7, -8} i.e. we use
+      // offsets {1, 3, 5, 7, 8} * unit.
+
+      if (direction == copy_forwards) {
+        __ sub(s, s, 16);
+        __ sub(d, d, 8);
+      }
+
+      // Fill 8 registers
+      //
+      // for forwards copy s was offset by -16 from the original input
+      // value of s so the register contents are at these offsets
+      // relative to the 64 bit block addressed by that original input
+      // and so on for each successive 64 byte block when s is updated
+      //
+      // t0 at offset 0,  t1 at offset 8
+      // t2 at offset 16, t3 at offset 24
+      // t4 at offset 32, t5 at offset 40
+      // t6 at offset 48, t7 at offset 56
+
+      // for backwards copy s was not offset so the register contents
+      // are at these offsets into the preceding 64 byte block
+      // relative to that original input and so on for each successive
+      // preceding 64 byte block when s is updated. this explains the
+      // slightly counter-intuitive looking pattern of register usage
+      // in the stp instructions for backwards copy.
+      //
+      // t0 at offset -16, t1 at offset -8
+      // t2 at offset -32, t3 at offset -24
+      // t4 at offset -48, t5 at offset -40
+      // t6 at offset -64, t7 at offset -56
+
+      __ ldp(t0, t1, Address(s, 2 * unit));
+      __ ldp(t2, t3, Address(s, 4 * unit));
+      __ ldp(t4, t5, Address(s, 6 * unit));
+      __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+
+      __ subs(count, count, 16);
+      __ br(Assembler::LO, drain);
+
+      int prefetch = PrefetchCopyIntervalInBytes;
+      bool use_stride = false;
+      if (direction == copy_backwards) {
+         use_stride = prefetch > 256;
+         prefetch = -prefetch;
+         if (use_stride) __ mov(stride, prefetch);
+      }
+
+      __ bind(again);
+
+      if (PrefetchCopyIntervalInBytes > 0)
+        __ prfm(use_stride ? Address(s, stride) : Address(s, prefetch), PLDL1KEEP);
+
+      if (direction == copy_forwards) {
+       // allowing for the offset of -8 the store instructions place
+       // registers into the target 64 bit block at the following
+       // offsets
+       //
+       // t0 at offset 0
+       // t1 at offset 8,  t2 at offset 16
+       // t3 at offset 24, t4 at offset 32
+       // t5 at offset 40, t6 at offset 48
+       // t7 at offset 56
+
+        __ str(t0, Address(d, 1 * unit));
+        __ stp(t1, t2, Address(d, 2 * unit));
+        __ ldp(t0, t1, Address(s, 2 * unit));
+        __ stp(t3, t4, Address(d, 4 * unit));
+        __ ldp(t2, t3, Address(s, 4 * unit));
+        __ stp(t5, t6, Address(d, 6 * unit));
+        __ ldp(t4, t5, Address(s, 6 * unit));
+        __ str(t7, Address(__ pre(d, 8 * unit)));
+        __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+      } else {
+       // d was not offset when we started so the registers are
+       // written into the 64 bit block preceding d with the following
+       // offsets
+       //
+       // t1 at offset -8
+       // t3 at offset -24, t0 at offset -16
+       // t5 at offset -48, t2 at offset -32
+       // t7 at offset -56, t4 at offset -48
+       //                   t6 at offset -64
+       //
+       // note that this matches the offsets previously noted for the
+       // loads
+
+        __ str(t1, Address(d, 1 * unit));
+        __ stp(t3, t0, Address(d, 3 * unit));
+        __ ldp(t0, t1, Address(s, 2 * unit));
+        __ stp(t5, t2, Address(d, 5 * unit));
+        __ ldp(t2, t3, Address(s, 4 * unit));
+        __ stp(t7, t4, Address(d, 7 * unit));
+        __ ldp(t4, t5, Address(s, 6 * unit));
+        __ str(t6, Address(__ pre(d, 8 * unit)));
+        __ ldp(t6, t7, Address(__ pre(s, 8 * unit)));
+      }
+
+      __ subs(count, count, 8);
+      __ br(Assembler::HS, again);
+
+      // Drain
+      //
+      // this uses the same pattern of offsets and register arguments
+      // as above
+      __ bind(drain);
+      if (direction == copy_forwards) {
+        __ str(t0, Address(d, 1 * unit));
+        __ stp(t1, t2, Address(d, 2 * unit));
+        __ stp(t3, t4, Address(d, 4 * unit));
+        __ stp(t5, t6, Address(d, 6 * unit));
+        __ str(t7, Address(__ pre(d, 8 * unit)));
+      } else {
+        __ str(t1, Address(d, 1 * unit));
+        __ stp(t3, t0, Address(d, 3 * unit));
+        __ stp(t5, t2, Address(d, 5 * unit));
+        __ stp(t7, t4, Address(d, 7 * unit));
+        __ str(t6, Address(__ pre(d, 8 * unit)));
+      }
+      // now we need to copy any remaining part block which may
+      // include a 4 word block subblock and/or a 2 word subblock.
+      // bits 2 and 1 in the count are the tell-tale for whetehr we
+      // have each such subblock
+      {
+        Label L1, L2;
+        __ tbz(count, exact_log2(4), L1);
+       // this is the same as above but copying only 4 longs hence
+       // with ony one intervening stp between the str instructions
+       // but note that the offsets and registers still follow the
+       // same pattern
+        __ ldp(t0, t1, Address(s, 2 * unit));
+        __ ldp(t2, t3, Address(__ pre(s, 4 * unit)));
+        if (direction == copy_forwards) {
+          __ str(t0, Address(d, 1 * unit));
+          __ stp(t1, t2, Address(d, 2 * unit));
+          __ str(t3, Address(__ pre(d, 4 * unit)));
+        } else {
+          __ str(t1, Address(d, 1 * unit));
+          __ stp(t3, t0, Address(d, 3 * unit));
+          __ str(t2, Address(__ pre(d, 4 * unit)));
+        }
+        __ bind(L1);
+
+        __ tbz(count, 1, L2);
+       // this is the same as above but copying only 2 longs hence
+       // there is no intervening stp between the str instructions
+       // but note that the offset and register patterns are still
+       // the same
+        __ ldp(t0, t1, Address(__ pre(s, 2 * unit)));
+        if (direction == copy_forwards) {
+          __ str(t0, Address(d, 1 * unit));
+          __ str(t1, Address(__ pre(d, 2 * unit)));
+        } else {
+          __ str(t1, Address(d, 1 * unit));
+          __ str(t0, Address(__ pre(d, 2 * unit)));
+        }
+        __ bind(L2);
+
+       // for forwards copy we need to re-adjust the offsets we
+       // applied so that s and d are follow the last words written
+
+       if (direction == copy_forwards) {
+         __ add(s, s, 16);
+         __ add(d, d, 8);
+       }
+
+      }
+
+      __ ret(lr);
+      }
   }
 
   // Small copy: less than 16 bytes.
@@ -1024,11 +1222,9 @@
     // (96 bytes if SIMD because we do 32 byes per instruction)
     __ bind(copy80);
     if (UseSIMDForMemoryOps) {
-      __ ldpq(v0, v1, Address(s, 0));
-      __ ldpq(v2, v3, Address(s, 32));
+      __ ld4(v0, v1, v2, v3, __ T16B, Address(s, 0));
       __ ldpq(v4, v5, Address(send, -32));
-      __ stpq(v0, v1, Address(d, 0));
-      __ stpq(v2, v3, Address(d, 32));
+      __ st4(v0, v1, v2, v3, __ T16B, Address(d, 0));
       __ stpq(v4, v5, Address(dend, -32));
     } else {
       __ ldp(t0, t1, Address(s, 0));
@@ -2150,8 +2346,9 @@
     __ subw(count, count, cnt_words, Assembler::LSL, 3 - shift);
     if (UseBlockZeroing) {
       Label non_block_zeroing, rest;
+      Register tmp = rscratch1;
       // count >= BlockZeroingLowLimit && value == 0
-      __ cmp(cnt_words, BlockZeroingLowLimit >> 3);
+      __ subs(tmp, cnt_words, BlockZeroingLowLimit >> 3);
       __ ccmp(value, 0 /* comparing value */, 0 /* NZCV */, Assembler::GE);
       __ br(Assembler::NE, non_block_zeroing);
       __ mov(bz_base, to);
--- a/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -437,6 +437,21 @@
   __ restore_locals();
   __ restore_constant_pool_cache();
   __ get_method(rmethod);
+  __ get_dispatch();
+
+  // Calculate stack limit
+  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
+  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
+  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
+  __ ldr(rscratch2,
+         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
+  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
+  __ andr(sp, rscratch1, -16);
+
+  // Restore expression stack pointer
+  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
+  // NULL last_sp until next java call
+  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
 
 #if INCLUDE_JVMCI
   // Check if we need to take lock at entry of synchronized method.
@@ -463,22 +478,6 @@
     __ bind(L);
   }
 
-  __ get_dispatch();
-
-  // Calculate stack limit
-  __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
-  __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
-  __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
-  __ ldr(rscratch2,
-         Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
-  __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
-  __ andr(sp, rscratch1, -16);
-
-  // Restore expression stack pointer
-  __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-  // NULL last_sp until next java call
-  __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
-
   __ dispatch_next(state, step);
   return entry;
 }
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -2434,7 +2434,7 @@
   __ ldrsb(r0, field);
   __ push(ztos);
   // Rewrite bytecode to be faster
-  if (!is_static) {
+  if (rc == may_rewrite) {
     // use btos rewriting, no truncating to t/f bit is needed for getfield.
     patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
   }
@@ -2670,7 +2670,7 @@
     if (!is_static) pop_and_check_object(obj);
     __ andw(r0, r0, 0x1);
     __ strb(r0, field);
-    if (!is_static) {
+    if (rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
     }
     __ b(Done);
--- a/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/aarch64/vm/vm_version_aarch64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2015, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,10 +29,10 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/java.hpp"
 #include "runtime/stubCodeGenerator.hpp"
+#include "utilities/macros.hpp"
 #include "vm_version_aarch64.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
+
+#include OS_HEADER_INLINE(os)
 
 #ifndef BUILTIN_SIM
 #include <sys/auxv.h>
@@ -175,7 +175,15 @@
   }
 
   // Enable vendor specific features
-  if (_cpu == CPU_CAVIUM && _variant == 0) _features |= CPU_DMB_ATOMICS;
+  if (_cpu == CPU_CAVIUM) {
+    if (_variant == 0) _features |= CPU_DMB_ATOMICS;
+    if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
+      FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
+    }
+    if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
+      FLAG_SET_DEFAULT(UseSIMDForMemoryOps, (_variant > 0));
+    }
+  }
   if (_cpu == CPU_ARM && (_model == 0xd03 || _model2 == 0xd03)) _features |= CPU_A53MAC;
   if (_cpu == CPU_ARM && (_model == 0xd07 || _model2 == 0xd07)) _features |= CPU_STXR_PREFETCH;
   // If an olde style /proc/cpuinfo (cpu_lines == 1) then if _model is an A57 (0xd07)
--- a/src/cpu/ppc/vm/bytes_ppc.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/bytes_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -274,8 +274,6 @@
 #endif // VM_LITTLE_ENDIAN
 };
 
-#if defined(TARGET_OS_ARCH_linux_ppc)
-#include "bytes_linux_ppc.inline.hpp"
-#endif
+#include OS_CPU_HEADER_INLINE(bytes)
 
 #endif // CPU_PPC_VM_BYTES_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/c1_LIR_ppc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/register.hpp"
+#include "c1/c1_LIR.hpp"
+
+FloatRegister LIR_OprDesc::as_float_reg() const {
+  return as_FloatRegister(fpu_regnr());
+}
+
+FloatRegister LIR_OprDesc::as_double_reg() const {
+  return as_FloatRegister(fpu_regnrLo());
+}
+
+// Reg2 unused.
+LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
+  assert(!as_FloatRegister(reg2)->is_valid(), "Not used on this platform");
+  return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+                             (reg1 << LIR_OprDesc::reg2_shift) |
+                             LIR_OprDesc::double_type          |
+                             LIR_OprDesc::fpu_register         |
+                             LIR_OprDesc::double_size);
+}
+
+#ifndef PRODUCT
+void LIR_Address::verify() const {
+  assert(scale() == times_1, "Scaled addressing mode not available on PPC and should not be used");
+  assert(disp() == 0 || index()->is_illegal(), "can't have both");
+#ifdef _LP64
+  assert(base()->is_cpu_register(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#else
+  assert(base()->is_single_cpu(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#endif
+}
+#endif // PRODUCT
--- a/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -47,7 +47,7 @@
 // The expected size in bytes of a cache line, used to pad data structures.
 #define DEFAULT_CACHE_LINE_SIZE 128
 
-#if defined(COMPILER2) && (defined(AIX) || defined(linux))
+#if defined(COMPILER2) && (defined(AIX) || defined(LINUX))
 // Include Transactional Memory lock eliding optimization
 #define INCLUDE_RTM_OPT 1
 #endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interp_masm_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_INTERP_MASM_PPC_HPP
+#define CPU_PPC_VM_INTERP_MASM_PPC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "interpreter/invocationCounter.hpp"
+
+// This file specializes the assembler with interpreter-specific macros.
+
+
+class InterpreterMacroAssembler: public MacroAssembler {
+
+ public:
+  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
+
+  void null_check_throw(Register a, int offset, Register temp_reg);
+
+  void jump_to_entry(address entry, Register Rscratch);
+
+  // Handy address generation macros.
+#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
+#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
+
+  virtual void check_and_handle_popframe(Register java_thread);
+  virtual void check_and_handle_earlyret(Register java_thread);
+
+  // Base routine for all dispatches.
+  void dispatch_base(TosState state, address* table);
+
+  void load_earlyret_value(TosState state, Register Rscratch1);
+
+  static const Address l_tmp;
+  static const Address d_tmp;
+
+  // dispatch routines
+  void dispatch_next(TosState state, int step = 0);
+  void dispatch_via (TosState state, address* table);
+  void load_dispatch_table(Register dst, address* table);
+  void dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify = false);
+
+  // Called by shared interpreter generator.
+  void dispatch_prolog(TosState state, int step = 0);
+  void dispatch_epilog(TosState state, int step = 0);
+
+  // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
+  void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
+  void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
+                     address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
+
+  // Generate a subtype check: branch to ok_is_subtype if sub_klass is
+  // a subtype of super_klass.  Blows registers tmp1, tmp2 and tmp3.
+  void gen_subtype_check(Register sub_klass, Register super_klass,
+                         Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
+
+  // Load object from cpool->resolved_references(index).
+  void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
+
+  void load_receiver(Register Rparam_count, Register Rrecv_dst);
+
+  // helpers for expression stack
+  void pop_i(     Register r = R17_tos);
+  void pop_ptr(   Register r = R17_tos);
+  void pop_l(     Register r = R17_tos);
+  void pop_f(FloatRegister f = F15_ftos);
+  void pop_d(FloatRegister f = F15_ftos );
+
+  void push_i(     Register r = R17_tos);
+  void push_ptr(   Register r = R17_tos);
+  void push_l(     Register r = R17_tos);
+  void push_f(FloatRegister f = F15_ftos );
+  void push_d(FloatRegister f = F15_ftos);
+
+  void push_2ptrs(Register first, Register second);
+
+  void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
+  void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
+
+  void pop (TosState state);           // transition vtos -> state
+  void push(TosState state);           // transition state -> vtos
+  void empty_expression_stack();       // Resets both Lesp and SP.
+
+ public:
+  // Load values from bytecode stream:
+
+  enum signedOrNot { Signed, Unsigned };
+  enum setCCOrNot  { set_CC, dont_set_CC };
+
+  void get_2_byte_integer_at_bcp(int         bcp_offset,
+                                 Register    Rdst,
+                                 signedOrNot is_signed);
+
+  void get_4_byte_integer_at_bcp(int         bcp_offset,
+                                 Register    Rdst,
+                                 signedOrNot is_signed = Unsigned);
+
+  void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
+
+  void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
+
+  void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
+
+  // common code
+
+  void field_offset_at(int n, Register tmp, Register dest, Register base);
+  int  field_offset_at(Register object, address bcp, int offset);
+  void fast_iaaccess(int n, address bcp);
+  void fast_iaputfield(address bcp, bool do_store_check);
+
+  void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
+  void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
+
+  void get_const(Register Rdst);
+  void get_constant_pool(Register Rdst);
+  void get_constant_pool_cache(Register Rdst);
+  void get_cpool_and_tags(Register Rcpool, Register Rtags);
+  void is_a(Label& L);
+
+  void narrow(Register result);
+
+  // Java Call Helpers
+  void call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2);
+
+  // --------------------------------------------------
+
+  void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true,
+                                     bool install_monitor_exception = true);
+
+  // Removes the current activation (incl. unlocking of monitors).
+  // Additionally this code is used for earlyReturn in which case we
+  // want to skip throwing an exception and installing an exception.
+  void remove_activation(TosState state,
+                         bool throw_monitor_exception = true,
+                         bool install_monitor_exception = true);
+  void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
+
+  void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);
+
+  // Local variable access helpers
+  void load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex);
+  void load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex);
+  void load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex);
+  void load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
+  void load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
+  void store_local_int(Register Rvalue, Register Rindex);
+  void store_local_long(Register Rvalue, Register Rindex);
+  void store_local_ptr(Register Rvalue, Register Rindex);
+  void store_local_float(FloatRegister Rvalue, Register Rindex);
+  void store_local_double(FloatRegister Rvalue, Register Rindex);
+
+  // Call VM for std frames
+  // Special call VM versions that check for exceptions and forward exception
+  // via short cut (not via expensive forward exception stub).
+  void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
+  void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
+  // Should not be used:
+  void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true) {ShouldNotReachHere();}
+  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true) {ShouldNotReachHere();}
+  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true) {ShouldNotReachHere();}
+  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true) {ShouldNotReachHere();}
+
+  Address first_local_in_stack();
+
+  enum LoadOrStore { load, store };
+  void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
+  void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
+  void static_dload_or_store(int which_local, LoadOrStore direction);
+
+  void save_interpreter_state(Register scratch);
+  void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
+
+  void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
+  void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register target_bcp, Register disp, Register Rtmp);
+
+  void record_static_call_in_profile(Register Rentry, Register Rtmp);
+  void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
+
+  void get_method_counters(Register method, Register Rcounters, Label& skip);
+  void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
+
+  // Object locking
+  void lock_object  (Register lock_reg, Register obj_reg);
+  void unlock_object(Register lock_reg, bool check_for_exceptions = true);
+
+  // Interpreter profiling operations
+  void set_method_data_pointer_for_bcp();
+  void test_method_data_pointer(Label& zero_continue);
+  void verify_method_data_pointer();
+  void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rscratch, Label &profile_continue);
+
+  void set_mdp_data_at(int constant, Register value);
+
+  void increment_mdp_data_at(int constant, Register counter_addr, Register Rbumped_count, bool decrement = false);
+
+  void increment_mdp_data_at(Register counter_addr, Register Rbumped_count, bool decrement = false);
+  void increment_mdp_data_at(Register reg, int constant, Register scratch, Register Rbumped_count, bool decrement = false);
+
+  void set_mdp_flag_at(int flag_constant, Register scratch);
+  void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, Register test_out);
+
+  void update_mdp_by_offset(int offset_of_disp, Register scratch);
+  void update_mdp_by_offset(Register reg, int offset_of_disp,
+                            Register scratch);
+  void update_mdp_by_constant(int constant);
+  void update_mdp_for_ret(TosState state, Register return_bci);
+
+  void profile_taken_branch(Register scratch, Register bumped_count);
+  void profile_not_taken_branch(Register scratch1, Register scratch2);
+  void profile_call(Register scratch1, Register scratch2);
+  void profile_final_call(Register scratch1, Register scratch2);
+  void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2,  bool receiver_can_be_null);
+  void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
+  void profile_typecheck_failed(Register Rscratch1, Register Rscratch2);
+  void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
+  void profile_switch_default(Register scratch1, Register scratch2);
+  void profile_switch_case(Register index, Register scratch1,Register scratch2, Register scratch3);
+  void profile_null_seen(Register Rscratch1, Register Rscratch2);
+  void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
+  void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
+
+  // Argument and return type profiling.
+  void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
+  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
+  void profile_return_type(Register ret, Register tmp1, Register tmp2);
+  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
+  // Debugging
+  void verify_oop(Register reg, TosState state = atos);    // only if +VerifyOops && state == atos
+  void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
+  void verify_FPU(int stack_depth, TosState state = ftos);
+
+  typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
+
+  // Support for jvmdi/jvmpi.
+  void notify_method_entry();
+  void notify_method_exit(bool is_native_method, TosState state,
+                          NotifyMethodExitMode mode, bool check_exceptions);
+};
+
+#endif // CPU_PPC_VM_INTERP_MASM_PPC_HPP
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -26,7 +26,7 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
-#include "interp_masm_ppc_64.hpp"
+#include "interp_masm_ppc.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
-#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
-
-#include "asm/macroAssembler.hpp"
-#include "interpreter/invocationCounter.hpp"
-
-// This file specializes the assembler with interpreter-specific macros.
-
-
-class InterpreterMacroAssembler: public MacroAssembler {
-
- public:
-  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
-
-  void null_check_throw(Register a, int offset, Register temp_reg);
-
-  void jump_to_entry(address entry, Register Rscratch);
-
-  // Handy address generation macros.
-#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
-#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
-
-  virtual void check_and_handle_popframe(Register java_thread);
-  virtual void check_and_handle_earlyret(Register java_thread);
-
-  // Base routine for all dispatches.
-  void dispatch_base(TosState state, address* table);
-
-  void load_earlyret_value(TosState state, Register Rscratch1);
-
-  static const Address l_tmp;
-  static const Address d_tmp;
-
-  // dispatch routines
-  void dispatch_next(TosState state, int step = 0);
-  void dispatch_via (TosState state, address* table);
-  void load_dispatch_table(Register dst, address* table);
-  void dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify = false);
-
-  // Called by shared interpreter generator.
-  void dispatch_prolog(TosState state, int step = 0);
-  void dispatch_epilog(TosState state, int step = 0);
-
-  // Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls.
-  void super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
-  void super_call_VM(Register thread_cache, Register oop_result, Register last_java_sp,
-                     address entry_point, Register arg_1, Register arg_2, bool check_exception = true);
-
-  // Generate a subtype check: branch to ok_is_subtype if sub_klass is
-  // a subtype of super_klass.  Blows registers tmp1, tmp2 and tmp3.
-  void gen_subtype_check(Register sub_klass, Register super_klass,
-                         Register tmp1, Register tmp2, Register tmp3, Label &ok_is_subtype);
-
-  // Load object from cpool->resolved_references(index).
-  void load_resolved_reference_at_index(Register result, Register index, Label *is_null = NULL);
-
-  void load_receiver(Register Rparam_count, Register Rrecv_dst);
-
-  // helpers for expression stack
-  void pop_i(     Register r = R17_tos);
-  void pop_ptr(   Register r = R17_tos);
-  void pop_l(     Register r = R17_tos);
-  void pop_f(FloatRegister f = F15_ftos);
-  void pop_d(FloatRegister f = F15_ftos );
-
-  void push_i(     Register r = R17_tos);
-  void push_ptr(   Register r = R17_tos);
-  void push_l(     Register r = R17_tos);
-  void push_f(FloatRegister f = F15_ftos );
-  void push_d(FloatRegister f = F15_ftos);
-
-  void push_2ptrs(Register first, Register second);
-
-  void push_l_pop_d(Register l = R17_tos, FloatRegister d = F15_ftos);
-  void push_d_pop_l(FloatRegister d = F15_ftos, Register l = R17_tos);
-
-  void pop (TosState state);           // transition vtos -> state
-  void push(TosState state);           // transition state -> vtos
-  void empty_expression_stack();       // Resets both Lesp and SP.
-
- public:
-  // Load values from bytecode stream:
-
-  enum signedOrNot { Signed, Unsigned };
-  enum setCCOrNot  { set_CC, dont_set_CC };
-
-  void get_2_byte_integer_at_bcp(int         bcp_offset,
-                                 Register    Rdst,
-                                 signedOrNot is_signed);
-
-  void get_4_byte_integer_at_bcp(int         bcp_offset,
-                                 Register    Rdst,
-                                 signedOrNot is_signed = Unsigned);
-
-  void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
-
-  void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2));
-
-  void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
-
-  // common code
-
-  void field_offset_at(int n, Register tmp, Register dest, Register base);
-  int  field_offset_at(Register object, address bcp, int offset);
-  void fast_iaaccess(int n, address bcp);
-  void fast_iaputfield(address bcp, bool do_store_check);
-
-  void index_check(Register array, Register index, int index_shift, Register tmp, Register res);
-  void index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res);
-
-  void get_const(Register Rdst);
-  void get_constant_pool(Register Rdst);
-  void get_constant_pool_cache(Register Rdst);
-  void get_cpool_and_tags(Register Rcpool, Register Rtags);
-  void is_a(Label& L);
-
-  void narrow(Register result);
-
-  // Java Call Helpers
-  void call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2);
-
-  // --------------------------------------------------
-
-  void unlock_if_synchronized_method(TosState state, bool throw_monitor_exception = true,
-                                     bool install_monitor_exception = true);
-
-  // Removes the current activation (incl. unlocking of monitors).
-  // Additionally this code is used for earlyReturn in which case we
-  // want to skip throwing an exception and installing an exception.
-  void remove_activation(TosState state,
-                         bool throw_monitor_exception = true,
-                         bool install_monitor_exception = true);
-  void merge_frames(Register Rtop_frame_sp, Register return_pc, Register Rscratch1, Register Rscratch2); // merge top frames
-
-  void add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2);
-
-  // Local variable access helpers
-  void load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex);
-  void load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex);
-  void load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex);
-  void load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
-  void load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex);
-  void store_local_int(Register Rvalue, Register Rindex);
-  void store_local_long(Register Rvalue, Register Rindex);
-  void store_local_ptr(Register Rvalue, Register Rindex);
-  void store_local_float(FloatRegister Rvalue, Register Rindex);
-  void store_local_double(FloatRegister Rvalue, Register Rindex);
-
-  // Call VM for std frames
-  // Special call VM versions that check for exceptions and forward exception
-  // via short cut (not via expensive forward exception stub).
-  void check_and_forward_exception(Register Rscratch1, Register Rscratch2);
-  void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
-  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
-  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
-  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
-  // Should not be used:
-  void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true) {ShouldNotReachHere();}
-  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true) {ShouldNotReachHere();}
-  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true) {ShouldNotReachHere();}
-  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true) {ShouldNotReachHere();}
-
-  Address first_local_in_stack();
-
-  enum LoadOrStore { load, store };
-  void static_iload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
-  void static_aload_or_store(int which_local, LoadOrStore direction, Register Rtmp);
-  void static_dload_or_store(int which_local, LoadOrStore direction);
-
-  void save_interpreter_state(Register scratch);
-  void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
-
-  void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
-  void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register target_bcp, Register disp, Register Rtmp);
-
-  void record_static_call_in_profile(Register Rentry, Register Rtmp);
-  void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
-
-  void get_method_counters(Register method, Register Rcounters, Label& skip);
-  void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
-
-  // Object locking
-  void lock_object  (Register lock_reg, Register obj_reg);
-  void unlock_object(Register lock_reg, bool check_for_exceptions = true);
-
-  // Interpreter profiling operations
-  void set_method_data_pointer_for_bcp();
-  void test_method_data_pointer(Label& zero_continue);
-  void verify_method_data_pointer();
-  void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rscratch, Label &profile_continue);
-
-  void set_mdp_data_at(int constant, Register value);
-
-  void increment_mdp_data_at(int constant, Register counter_addr, Register Rbumped_count, bool decrement = false);
-
-  void increment_mdp_data_at(Register counter_addr, Register Rbumped_count, bool decrement = false);
-  void increment_mdp_data_at(Register reg, int constant, Register scratch, Register Rbumped_count, bool decrement = false);
-
-  void set_mdp_flag_at(int flag_constant, Register scratch);
-  void test_mdp_data_at(int offset, Register value, Label& not_equal_continue, Register test_out);
-
-  void update_mdp_by_offset(int offset_of_disp, Register scratch);
-  void update_mdp_by_offset(Register reg, int offset_of_disp,
-                            Register scratch);
-  void update_mdp_by_constant(int constant);
-  void update_mdp_for_ret(TosState state, Register return_bci);
-
-  void profile_taken_branch(Register scratch, Register bumped_count);
-  void profile_not_taken_branch(Register scratch1, Register scratch2);
-  void profile_call(Register scratch1, Register scratch2);
-  void profile_final_call(Register scratch1, Register scratch2);
-  void profile_virtual_call(Register Rreceiver, Register Rscratch1, Register Rscratch2,  bool receiver_can_be_null);
-  void profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2);
-  void profile_typecheck_failed(Register Rscratch1, Register Rscratch2);
-  void profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2);
-  void profile_switch_default(Register scratch1, Register scratch2);
-  void profile_switch_case(Register index, Register scratch1,Register scratch2, Register scratch3);
-  void profile_null_seen(Register Rscratch1, Register Rscratch2);
-  void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
-  void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
-
-  // Argument and return type profiling.
-  void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
-  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
-  void profile_return_type(Register ret, Register tmp1, Register tmp2);
-  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
-
-  // Debugging
-  void verify_oop(Register reg, TosState state = atos);    // only if +VerifyOops && state == atos
-  void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
-  void verify_FPU(int stack_depth, TosState state = ftos);
-
-  typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
-
-  // Support for jvmdi/jvmpi.
-  void notify_method_entry();
-  void notify_method_exit(bool is_native_method, TosState state,
-                          NotifyMethodExitMode mode, bool check_exceptions);
-};
-
-#endif // CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
--- a/src/cpu/ppc/vm/ppc_64.ad	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-//
-// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2012, 2013 SAP SE. All rights reserved.
-// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-//
-// This code is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License version 2 only, as
-// published by the Free Software Foundation.
-//
-// This code is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-// version 2 for more details (a copy is included in the LICENSE file that
-// accompanied this code).
-//
-// You should have received a copy of the GNU General Public License version
-// 2 along with this work; if not, write to the Free Software Foundation,
-// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-//
-// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-// or visit www.oracle.com if you need additional information or have any
-// questions.
-//
-//
--- a/src/cpu/ppc/vm/register_ppc.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/register_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,7 @@
 typedef RegisterImpl* Register;
 
 inline Register as_Register(int encoding) {
-  assert(encoding >= 0 && encoding < 32, "bad register encoding");
+  assert(encoding >= -1 && encoding < 32, "bad register encoding");
   return (Register)(intptr_t)encoding;
 }
 
@@ -91,7 +91,7 @@
   inline friend Register as_Register(int encoding);
 
   // accessors
-  int encoding()  const { assert(is_valid(), "invalid register"); return value(); }
+  int encoding() const { assert(is_valid(), "invalid register"); return value(); }
   inline VMReg as_VMReg();
   Register successor() const { return as_Register(encoding() + 1); }
 
@@ -247,7 +247,7 @@
 typedef FloatRegisterImpl* FloatRegister;
 
 inline FloatRegister as_FloatRegister(int encoding) {
-  assert(encoding >= 0 && encoding < 32, "bad float register encoding");
+  assert(encoding >= -1 && encoding < 32, "bad float register encoding");
   return (FloatRegister)(intptr_t)encoding;
 }
 
@@ -267,7 +267,7 @@
   FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
 
   // testers
-  bool is_valid()       const { return (0  <=  value()       &&  value()       < number_of_registers); }
+  bool is_valid() const { return (0 <= value() && value() < number_of_registers); }
 
   const char* name() const;
 };
--- a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -40,7 +40,7 @@
 #include "c1/c1_Runtime1.hpp"
 #endif
 #ifdef COMPILER2
-#include "adfiles/ad_ppc_64.hpp"
+#include "opto/ad.hpp"
 #include "opto/runtime.hpp"
 #endif
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/stubRoutines_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_STUBROUTINES_PPC_HPP
+#define CPU_PPC_VM_STUBROUTINES_PPC_HPP
+
+// This file holds the platform specific parts of the StubRoutines
+// definition. See stubRoutines.hpp for a description on how to
+// extend it.
+
+static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
+
+enum platform_dependent_constants {
+  code_size1 = 20000,          // simply increase if too small (assembler will crash if too small)
+  code_size2 = 20000           // simply increase if too small (assembler will crash if too small)
+};
+
+// CRC32 Intrinsics.
+#define CRC32_COLUMN_SIZE 256
+#define CRC32_BYFOUR
+#ifdef  CRC32_BYFOUR
+  #define CRC32_TABLES 8
+#else
+  #define CRC32_TABLES 1
+#endif
+
+class ppc64 {
+ friend class StubGenerator;
+
+ private:
+
+  // CRC32 Intrinsics.
+  static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
+
+ public:
+
+  // CRC32 Intrinsics.
+  static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
+
+};
+
+#endif // CPU_PPC_VM_STUBROUTINES_PPC_HPP
--- a/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
-#define CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
-
-// This file holds the platform specific parts of the StubRoutines
-// definition. See stubRoutines.hpp for a description on how to
-// extend it.
-
-static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
-
-enum platform_dependent_constants {
-  code_size1 = 20000,          // simply increase if too small (assembler will crash if too small)
-  code_size2 = 20000           // simply increase if too small (assembler will crash if too small)
-};
-
-// CRC32 Intrinsics.
-#define CRC32_COLUMN_SIZE 256
-#define CRC32_BYFOUR
-#ifdef  CRC32_BYFOUR
-  #define CRC32_TABLES 8
-#else
-  #define CRC32_TABLES 1
-#endif
-
-class ppc64 {
- friend class StubGenerator;
-
- private:
-
-  // CRC32 Intrinsics.
-  static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
-
- public:
-
-  // CRC32 Intrinsics.
-  static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
-
-};
-
-#endif // CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
--- a/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -881,10 +881,6 @@
   BLOCK_COMMENT("} stack_overflow_check_with_compare");
 }
 
-void TemplateInterpreterGenerator::unlock_method(bool check_exceptions) {
-  __ unlock_object(R26_monitor, check_exceptions);
-}
-
 // Lock the current method, interpreter register window must be set up!
 void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded) {
   const Register Robj_to_lock = Rscratch2;
@@ -1566,7 +1562,7 @@
   if (synchronized) {
     // Don't check for exceptions since we're still in the i2n frame. Do that
     // manually afterwards.
-    unlock_method(false);
+    __ unlock_object(R26_monitor, false); // Can also unlock methods.
   }
 
   // Reset active handles after returning from native.
@@ -1609,7 +1605,7 @@
   if (synchronized) {
     // Don't check for exceptions since we're still in the i2n frame. Do that
     // manually afterwards.
-    unlock_method(false);
+    __ unlock_object(R26_monitor, false); // Can also unlock methods.
   }
   BIND(exception_return_sync_check_already_unlocked);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/templateTable_ppc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_HPP
+#define CPU_PPC_VM_TEMPLATETABLE_PPC_HPP
+
+  static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
+  static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
+  static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
+  static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
+
+  // Branch_conditional which takes TemplateTable::Condition.
+  static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
+  static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
+
+#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_HPP
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1668,9 +1668,13 @@
         __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
         __ addi(Rscratch2, Rscratch2, increment);
         __ stw(Rscratch2, mdo_bc_offs, Rmdo);
-        __ and_(Rscratch3, Rscratch2, Rscratch3);
-        __ bne(CCR0, Lforward);
-        __ b(Loverflow);
+        if (UseOnStackReplacement) {
+          __ and_(Rscratch3, Rscratch2, Rscratch3);
+          __ bne(CCR0, Lforward);
+          __ b(Loverflow);
+        } else {
+          __ b(Lforward);
+        }
       }
 
       // If there's no MDO, increment counter in method.
@@ -1680,9 +1684,12 @@
       __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
       __ addi(Rscratch2, Rscratch2, increment);
       __ stw(Rscratch2, mo_bc_offs, R4_counters);
-      __ and_(Rscratch3, Rscratch2, Rscratch3);
-      __ bne(CCR0, Lforward);
-
+      if (UseOnStackReplacement) {
+        __ and_(Rscratch3, Rscratch2, Rscratch3);
+        __ bne(CCR0, Lforward);
+      } else {
+        __ b(Lforward);
+      }
       __ bind(Loverflow);
 
       // Notify point for loop, pass branch bytecode.
--- a/src/cpu/ppc/vm/templateTable_ppc_64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
-#define CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
-
-  static void prepare_invoke(int byte_no, Register Rmethod, Register Rret_addr, Register Rindex, Register Rrecv, Register Rflags, Register Rscratch);
-  static void invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2);
-  static void generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp);
-  static void invokeinterface_object_method(Register Rrecv_klass, Register Rret, Register Rflags, Register Rindex, Register Rtemp, Register Rtemp2);
-
-  // Branch_conditional which takes TemplateTable::Condition.
-  static void branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert = false);
-  static void if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0);
-
-#endif // CPU_PPC_VM_TEMPLATETABLE_PPC_64_HPP
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -278,7 +278,7 @@
       os_too_old = false;
     }
 #endif
-#ifdef linux
+#ifdef LINUX
     // At least Linux kernel 4.2, as the problematic behavior of syscalls
     // being called in the middle of a transaction has been addressed.
     // Please, refer to commit b4b56f9ecab40f3b4ef53e130c9f6663be491894
--- a/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
 #include "code/vtableStubs.hpp"
-#include "interp_masm_ppc_64.hpp"
+#include "interp_masm_ppc.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/klassVtable.hpp"
@@ -243,7 +243,7 @@
 }
 
 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) {
+  if (DebugVtables || CountCompiledCalls || VerifyOops) {
     return 1000;
   } else {
     int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/sparc/vm/c1_LIR_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/register.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "c1/c1_LIR.hpp"
+
+FloatRegister LIR_OprDesc::as_float_reg() const {
+  return FrameMap::nr2floatreg(fpu_regnr());
+}
+
+FloatRegister LIR_OprDesc::as_double_reg() const {
+  return FrameMap::nr2floatreg(fpu_regnrHi());
+}
+
+LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
+  assert(as_FloatRegister(reg2) != fnoreg, "Sparc holds double in two regs.");
+  return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+                             (reg2 << LIR_OprDesc::reg2_shift) |
+                             LIR_OprDesc::double_type          |
+                             LIR_OprDesc::fpu_register         |
+                             LIR_OprDesc::double_size);
+}
+
+#ifndef PRODUCT
+void LIR_Address::verify() const {
+  assert(scale() == times_1, "Scaled addressing mode not available on SPARC and should not be used");
+  assert(disp() == 0 || index()->is_illegal(), "can't have both");
+#ifdef _LP64
+  assert(base()->is_cpu_register(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#else
+  assert(base()->is_single_cpu(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#endif
+}
+#endif // PRODUCT
--- a/src/cpu/sparc/vm/compiledIC_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/compiledIC_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -77,8 +77,7 @@
   // This doesn't need to be accurate but it must be larger or equal to
   // the real size of the stub.
   return (NativeMovConstReg::instruction_size +  // sethi/setlo;
-          NativeJump::instruction_size + // sethi; jmp; nop
-          (TraceJumps ? 20 * BytesPerInstWord : 0) );
+          NativeJump::instruction_size); // sethi; jmp; nop
 }
 
 // Relocation entries for call stub, compiled java to interpreter.
--- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -32,7 +32,7 @@
 
 // Indicates whether the C calling conventions require that
 // 32-bit integer argument values are extended to 64 bits.
-const bool CCallingConventionRequiresIntsAsLongs = false;
+const bool CCallingConventionRequiresIntsAsLongs = true;
 
 #define SUPPORTS_NATIVE_CX8
 
--- a/src/cpu/sparc/vm/icBuffer_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/icBuffer_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -33,12 +33,10 @@
 
 int InlineCacheBuffer::ic_stub_code_size() {
 #ifdef _LP64
-  if (TraceJumps) return 600 * wordSize;
   return (NativeMovConstReg::instruction_size +  // sethi;add
           NativeJump::instruction_size +          // sethi; jmp; delay slot
           (1*BytesPerInstWord) + 1);            // flush + 1 extra byte
 #else
-  if (TraceJumps) return 300 * wordSize;
   return (2+2+ 1) * wordSize + 1; // set/jump_to/nop + 1 byte so that code_end can be set in CodeBuffer
 #endif
 }
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -184,72 +184,10 @@
 
 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
   assert_not_delayed();
-  // This can only be traceable if r1 & r2 are visible after a window save
-  if (TraceJumps) {
-#ifndef PRODUCT
-    save_frame(0);
-    verify_thread();
-    ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
-    add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
-    sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
-    add(O2, O1, O1);
-
-    add(r1->after_save(), r2->after_save(), O2);
-    set((intptr_t)file, O3);
-    set(line, O4);
-    Label L;
-    // get nearby pc, store jmp target
-    call(L, relocInfo::none);  // No relocation for call to pc+0x8
-    delayed()->st(O2, O1, 0);
-    bind(L);
-
-    // store nearby pc
-    st(O7, O1, sizeof(intptr_t));
-    // store file
-    st(O3, O1, 2*sizeof(intptr_t));
-    // store line
-    st(O4, O1, 3*sizeof(intptr_t));
-    add(O0, 1, O0);
-    and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
-    st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
-    restore();
-#endif /* PRODUCT */
-  }
   jmpl(r1, r2, G0);
 }
 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
   assert_not_delayed();
-  // This can only be traceable if r1 is visible after a window save
-  if (TraceJumps) {
-#ifndef PRODUCT
-    save_frame(0);
-    verify_thread();
-    ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
-    add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
-    sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
-    add(O2, O1, O1);
-
-    add(r1->after_save(), offset, O2);
-    set((intptr_t)file, O3);
-    set(line, O4);
-    Label L;
-    // get nearby pc, store jmp target
-    call(L, relocInfo::none);  // No relocation for call to pc+0x8
-    delayed()->st(O2, O1, 0);
-    bind(L);
-
-    // store nearby pc
-    st(O7, O1, sizeof(intptr_t));
-    // store file
-    st(O3, O1, 2*sizeof(intptr_t));
-    // store line
-    st(O4, O1, 3*sizeof(intptr_t));
-    add(O0, 1, O0);
-    and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
-    st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
-    restore();
-#endif /* PRODUCT */
-  }
   jmp(r1, offset);
 }
 
@@ -260,44 +198,7 @@
   // variable length instruction streams.
   patchable_sethi(addrlit, temp);
   Address a(temp, addrlit.low10() + offset);  // Add the offset to the displacement.
-  if (TraceJumps) {
-#ifndef PRODUCT
-    // Must do the add here so relocation can find the remainder of the
-    // value to be relocated.
-    add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
-    save_frame(0);
-    verify_thread();
-    ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
-    add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
-    sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
-    add(O2, O1, O1);
-
-    set((intptr_t)file, O3);
-    set(line, O4);
-    Label L;
-
-    // get nearby pc, store jmp target
-    call(L, relocInfo::none);  // No relocation for call to pc+0x8
-    delayed()->st(a.base()->after_save(), O1, 0);
-    bind(L);
-
-    // store nearby pc
-    st(O7, O1, sizeof(intptr_t));
-    // store file
-    st(O3, O1, 2*sizeof(intptr_t));
-    // store line
-    st(O4, O1, 3*sizeof(intptr_t));
-    add(O0, 1, O0);
-    and3(O0, JavaThread::jump_ring_buffer_size  - 1, O0);
-    st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
-    restore();
-    jmpl(a.base(), G0, d);
-#else
-    jmpl(a.base(), a.disp(), d);
-#endif /* PRODUCT */
-  } else {
-    jmpl(a.base(), a.disp(), d);
-  }
+  jmpl(a.base(), a.disp(), d);
 }
 
 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -703,8 +703,8 @@
 
   inline void tst( Register s );
 
-  inline void ret(  bool trace = TraceJumps );
-  inline void retl( bool trace = TraceJumps );
+  inline void ret(  bool trace = false );
+  inline void retl( bool trace = false );
 
   // Required platform-specific helpers for Label::patch_instructions.
   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -760,8 +760,7 @@
   Register rd = inv_rd(i0);
 #ifndef _LP64
   if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
-        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
-        (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
+        (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op)) &&
         inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
         rd == inv_rs1(i1))) {
     fatal("not a jump_to instruction");
--- a/src/cpu/sparc/vm/register_definitions_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/register_definitions_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
 // make sure the defines don't screw up the declarations later on in this file
 #define DONT_USE_REGISTER_DEFINES
 
-#include "precompiled.hpp"
+// Note: precompiled headers can not be used in this file because of the above
+//       definition
+
 #include "asm/assembler.hpp"
 #include "asm/register.hpp"
 #include "interp_masm_sparc.hpp"
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -3368,9 +3368,7 @@
   // setup code generation tools
   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
-  // even larger with TraceJumps
-  int pad = TraceJumps ? 512 : 0;
-  CodeBuffer buffer("handler_blob", 1600 + pad, 512);
+  CodeBuffer buffer("handler_blob", 1600, 512);
   MacroAssembler* masm                = new MacroAssembler(&buffer);
   int             frame_size_words;
   OopMapSet *oop_maps = new OopMapSet();
@@ -3462,9 +3460,7 @@
   // setup code generation tools
   // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
   // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
-  // even larger with TraceJumps
-  int pad = TraceJumps ? 512 : 0;
-  CodeBuffer buffer(name, 1600 + pad, 512);
+  CodeBuffer buffer(name, 1600, 512);
   MacroAssembler* masm                = new MacroAssembler(&buffer);
   int             frame_size_words;
   OopMapSet *oop_maps = new OopMapSet();
--- a/src/cpu/sparc/vm/sparc.ad	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Aug 05 09:50:25 2016 -0700
@@ -501,16 +501,10 @@
   static int emit_deopt_handler(CodeBuffer& cbuf);
 
   static uint size_exception_handler() {
-    if (TraceJumps) {
-      return (400); // just a guess
-    }
     return ( NativeJump::instruction_size ); // sethi;jmp;nop
   }
 
   static uint size_deopt_handler() {
-    if (TraceJumps) {
-      return (400); // just a guess
-    }
     return ( 4+  NativeJump::instruction_size ); // save;sethi;jmp;restore
   }
 };
@@ -720,7 +714,7 @@
   return offset;
 }
 
-static inline jdouble replicate_immI(int con, int count, int width) {
+static inline jlong replicate_immI(int con, int count, int width) {
   // Load a constant replicated "count" times with width "width"
   assert(count*width == 8 && width <= 4, "sanity");
   int bit_width = width * 8;
@@ -729,17 +723,15 @@
   for (int i = 0; i < count - 1; i++) {
     val |= (val << bit_width);
   }
-  jdouble dval = *((jdouble*) &val);  // coerce to double type
-  return dval;
-}
-
-static inline jdouble replicate_immF(float con) {
+  return val;
+}
+
+static inline jlong replicate_immF(float con) {
   // Replicate float con 2 times and pack into vector.
   int val = *((int*)&con);
   jlong lval = val;
   lval = (lval << 32) | (lval & 0xFFFFFFFFl);
-  jdouble dval = *((jdouble*) &lval);  // coerce to double type
-  return dval;
+  return lval;
 }
 
 // Standard Sparc opcode form2 field breakdown
@@ -2661,8 +2653,7 @@
 
       // Emit stub for static call.
       address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
-      // Stub does not fit into scratch buffer if TraceJumps is enabled
-      if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) {
+      if (stub == NULL) {
         ciEnv::current()->record_failure("CodeCache is full");
         return;
       }
--- a/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1560,13 +1560,7 @@
     __ bind(ok);
   }
 #endif
-  if (TraceJumps) {
-    // Move target to register that is recordable
-    __ mov(Lscratch, G3_scratch);
-    __ JMP(G3_scratch, 0);
-  } else {
-    __ jmp(Lscratch, 0);
-  }
+  __ jmp(Lscratch, 0);
   __ delayed()->nop();
 
 
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1636,7 +1636,7 @@
                                                  in_bytes(InvocationCounter::counter_offset()));
         Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
-                                   Assembler::notZero, &Lforward);
+                                   (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
         __ ba_short(Loverflow);
       }
 
@@ -1647,7 +1647,7 @@
               in_bytes(InvocationCounter::counter_offset()));
       Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
       __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
-                                 Assembler::notZero, &Lforward);
+                                 (UseOnStackReplacement ? Assembler::notZero : Assembler::always), &Lforward);
       __ bind(Loverflow);
 
       // notify point for loop, pass branch bytecode
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -221,7 +221,7 @@
 
 
 int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
+  if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
   else {
     const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
     if (is_vtable_stub) {
--- a/src/cpu/x86/vm/bytes_x86.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/bytes_x86.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define CPU_X86_VM_BYTES_X86_HPP
 
 #include "memory/allocation.hpp"
+#include "utilities/macros.hpp"
 
 class Bytes: AllStatic {
  private:
@@ -70,20 +71,7 @@
   static inline u8   swap_u8(u8 x);
 };
 
-
 // The following header contains the implementations of swap_u2, swap_u4, and swap_u8[_base]
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "bytes_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "bytes_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "bytes_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "bytes_bsd_x86.inline.hpp"
-#endif
-
+#include OS_CPU_HEADER_INLINE(bytes)
 
 #endif // CPU_X86_VM_BYTES_X86_HPP
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -761,7 +761,6 @@
   } else {
     a = new LIR_Address(obj.result(),
                         offset.result(),
-                        LIR_Address::times_1,
                         0,
                         as_BasicType(type));
   }
@@ -1081,7 +1080,6 @@
 
       LIR_Address* a = new LIR_Address(base_op,
                                        index,
-                                       LIR_Address::times_1,
                                        offset,
                                        T_BYTE);
       BasicTypeList signature(3);
@@ -1157,13 +1155,11 @@
 
   LIR_Address* addr_a = new LIR_Address(result_a,
                                         result_aOffset,
-                                        LIR_Address::times_1,
                                         constant_aOffset,
                                         T_BYTE);
 
   LIR_Address* addr_b = new LIR_Address(result_b,
                                         result_bOffset,
-                                        LIR_Address::times_1,
                                         constant_bOffset,
                                         T_BYTE);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/c1_LIR_x86.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/register.hpp"
+#include "c1/c1_FrameMap.hpp"
+#include "c1/c1_LIR.hpp"
+
+
+FloatRegister LIR_OprDesc::as_float_reg() const {
+  ShouldNotReachHere();
+  return fnoreg;
+}
+
+FloatRegister LIR_OprDesc::as_double_reg() const {
+  ShouldNotReachHere();
+  return fnoreg;
+}
+
+XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
+  return FrameMap::nr2xmmreg(xmm_regnr());
+}
+
+XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
+  assert(xmm_regnrLo() == xmm_regnrHi(), "assumed in calculation");
+  return FrameMap::nr2xmmreg(xmm_regnrLo());
+}
+
+// Reg2 unused.
+LIR_Opr LIR_OprFact::double_fpu(int reg1, int reg2) {
+  assert(as_FloatRegister(reg2) == fnoreg, "Not used on this platform");
+  return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+                             (reg1 << LIR_OprDesc::reg2_shift) |
+                             LIR_OprDesc::double_type          |
+                             LIR_OprDesc::fpu_register         |
+                             LIR_OprDesc::double_size);
+}
+
+#ifndef PRODUCT
+void LIR_Address::verify() const {
+#ifdef _LP64
+  assert(base()->is_cpu_register(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_double_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_LONG || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#else
+  assert(base()->is_single_cpu(), "wrong base operand");
+  assert(index()->is_illegal() || index()->is_single_cpu(), "wrong index operand");
+  assert(base()->type() == T_OBJECT || base()->type() == T_INT || base()->type() == T_METADATA,
+         "wrong type for addresses");
+#endif
+}
+#endif // PRODUCT
--- a/src/cpu/x86/vm/copy_x86.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/copy_x86.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,19 +28,7 @@
 // Inline functions for memory copy and fill.
 
 // Contains inline asm implementations
-#ifdef TARGET_OS_ARCH_linux_x86
-# include "copy_linux_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_solaris_x86
-# include "copy_solaris_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_windows_x86
-# include "copy_windows_x86.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_x86
-# include "copy_bsd_x86.inline.hpp"
-#endif
-
+#include OS_CPU_HEADER_INLINE(copy)
 
 static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
 #ifdef AMD64
--- a/src/cpu/x86/vm/globalDefinitions_x86.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/globalDefinitions_x86.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,7 @@
   #endif
 #endif
 
-#if defined(COMPILER2) && !defined(JAVASE_EMBEDDED)
+#if defined(COMPILER2)
 // Include Restricted Transactional Memory lock eliding optimization
 #define INCLUDE_RTM_OPT 1
 #endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/x86/vm/macroAssembler_x86.inline.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP
+#define CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP
+
+// Still empty.
+
+#endif // CPU_X86_VM_MACROASSEMBLER_X86_INLINE_HPP
--- a/src/cpu/x86/vm/macroAssembler_x86_tan.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/macroAssembler_x86_tan.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1060,7 +1060,7 @@
 
   bind(B1_4);
   addq(rsp, 16);
-
+  pop(rbx);
 }
 #else
 // The 32 bit code is at most SSE2 compliant
--- a/src/cpu/x86/vm/register_definitions_x86.cpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/register_definitions_x86.cpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,7 @@
 #include "asm/assembler.hpp"
 #include "asm/register.hpp"
 #include "register_x86.hpp"
-#ifdef TARGET_ARCH_x86
-# include "interp_masm_x86.hpp"
-#endif
+#include "interp_masm_x86.hpp"
 
 REGISTER_DEFINITION(Register, noreg);
 REGISTER_DEFINITION(Register, rax);
@@ -50,6 +48,8 @@
 REGISTER_DEFINITION(Register, r15);
 #endif // AMD64
 
+REGISTER_DEFINITION(FloatRegister, fnoreg);
+
 REGISTER_DEFINITION(XMMRegister, xnoreg);
 REGISTER_DEFINITION(XMMRegister, xmm0 );
 REGISTER_DEFINITION(XMMRegister, xmm1 );
--- a/src/cpu/x86/vm/register_x86.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/register_x86.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -124,6 +124,8 @@
 
 };
 
+CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
+
 // Use XMMRegister as shortcut
 class XMMRegisterImpl;
 typedef XMMRegisterImpl* XMMRegister;
--- a/src/cpu/x86/vm/stubRoutines_x86.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/stubRoutines_x86.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,83 @@
 // definition. See stubRoutines.hpp for a description on how to
 // extend it.
 
+static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
+
+enum platform_dependent_constants {
+  code_size1 = 20000 LP64_ONLY(+10000),         // simply increase if too small (assembler will crash if too small)
+  code_size2 = 33800 LP64_ONLY(+1200)           // simply increase if too small (assembler will crash if too small)
+};
+
+class x86 {
+ friend class StubGenerator;
+ friend class VMStructs;
+
+#ifdef _LP64
+ private:
+  static address _get_previous_fp_entry;
+  static address _get_previous_sp_entry;
+
+  static address _f2i_fixup;
+  static address _f2l_fixup;
+  static address _d2i_fixup;
+  static address _d2l_fixup;
+
+  static address _float_sign_mask;
+  static address _float_sign_flip;
+  static address _double_sign_mask;
+  static address _double_sign_flip;
+
+ public:
+
+  static address get_previous_fp_entry() {
+    return _get_previous_fp_entry;
+  }
+
+  static address get_previous_sp_entry() {
+    return _get_previous_sp_entry;
+  }
+
+  static address f2i_fixup() {
+    return _f2i_fixup;
+  }
+
+  static address f2l_fixup() {
+    return _f2l_fixup;
+  }
+
+  static address d2i_fixup() {
+    return _d2i_fixup;
+  }
+
+  static address d2l_fixup() {
+    return _d2l_fixup;
+  }
+
+  static address float_sign_mask() {
+    return _float_sign_mask;
+  }
+
+  static address float_sign_flip() {
+    return _float_sign_flip;
+  }
+
+  static address double_sign_mask() {
+    return _double_sign_mask;
+  }
+
+  static address double_sign_flip() {
+    return _double_sign_flip;
+  }
+#else // !LP64
+
+ private:
+  static address _verify_fpu_cntrl_wrd_entry;
+
+ public:
+  static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; }
+
+#endif // !LP64
+
  private:
   static address _verify_mxcsr_entry;
   // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers
@@ -138,4 +215,6 @@
   static address _Pi4x4_addr()      { return _Pi4x4_adr; }
   static address _ones_addr()      { return _ones_adr; }
 
-#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
+};
+
+#endif // CPU_X86_VM_STUBROUTINES_X86_HPP
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_STUBROUTINES_X86_32_HPP
-#define CPU_X86_VM_STUBROUTINES_X86_32_HPP
-
-// This file holds the platform specific parts of the StubRoutines
-// definition. See stubRoutines.hpp for a description on how to
-// extend it.
-
-enum platform_dependent_constants {
-  code_size1 =  20000,           // simply increase if too small (assembler will crash if too small)
-  code_size2 = 33800            // simply increase if too small (assembler will crash if too small)
-};
-
-class x86 {
- friend class StubGenerator;
- friend class VMStructs;
-
- private:
-  static address _verify_fpu_cntrl_wrd_entry;
-
- public:
-  static address verify_fpu_cntrl_wrd_entry()                { return _verify_fpu_cntrl_wrd_entry; }
-
-# include "stubRoutines_x86.hpp"
-
-};
-
-  static bool    returns_to_call_stub(address return_pc)     { return return_pc == _call_stub_return_address; }
-
-#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef CPU_X86_VM_STUBROUTINES_X86_64_HPP
-#define CPU_X86_VM_STUBROUTINES_X86_64_HPP
-
-// This file holds the platform specific parts of the StubRoutines
-// definition. See stubRoutines.hpp for a description on how to
-// extend it.
-
-static bool    returns_to_call_stub(address return_pc)   { return return_pc == _call_stub_return_address; }
-
-enum platform_dependent_constants {
-  code_size1 = 30000,          // simply increase if too small (assembler will crash if too small)
-  code_size2 = 35000           // simply increase if too small (assembler will crash if too small)
-};
-
-class x86 {
- friend class StubGenerator;
-
- private:
-  static address _get_previous_fp_entry;
-  static address _get_previous_sp_entry;
-
-  static address _f2i_fixup;
-  static address _f2l_fixup;
-  static address _d2i_fixup;
-  static address _d2l_fixup;
-
-  static address _float_sign_mask;
-  static address _float_sign_flip;
-  static address _double_sign_mask;
-  static address _double_sign_flip;
-
- public:
-
-  static address get_previous_fp_entry()
-  {
-    return _get_previous_fp_entry;
-  }
-
-  static address get_previous_sp_entry()
-  {
-    return _get_previous_sp_entry;
-  }
-
-  static address f2i_fixup()
-  {
-    return _f2i_fixup;
-  }
-
-  static address f2l_fixup()
-  {
-    return _f2l_fixup;
-  }
-
-  static address d2i_fixup()
-  {
-    return _d2i_fixup;
-  }
-
-  static address d2l_fixup()
-  {
-    return _d2l_fixup;
-  }
-
-  static address float_sign_mask()
-  {
-    return _float_sign_mask;
-  }
-
-  static address float_sign_flip()
-  {
-    return _float_sign_flip;
-  }
-
-  static address double_sign_mask()
-  {
-    return _double_sign_mask;
-  }
-
-  static address double_sign_flip()
-  {
-    return _double_sign_flip;
-  }
-
-# include "stubRoutines_x86.hpp"
-
-};
-
-#endif // CPU_X86_VM_STUBROUTINES_X86_64_HPP
--- a/src/cpu/x86/vm/x86.ad	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/x86/vm/x86.ad	Fri Aug 05 09:50:25 2016 -0700
@@ -2131,7 +2131,7 @@
   return size+offset_size;
 }
 
-static inline jfloat replicate4_imm(int con, int width) {
+static inline jint replicate4_imm(int con, int width) {
   // Load a constant of "width" (in bytes) and replicate it to fill 32bit.
   assert(width == 1 || width == 2, "only byte or short types here");
   int bit_width = width * 8;
@@ -2141,11 +2141,10 @@
     val |= (val << bit_width);
     bit_width <<= 1;
   }
-  jfloat fval = *((jfloat*) &val);  // coerce to float type
-  return fval;
+  return val;
 }
 
-static inline jdouble replicate8_imm(int con, int width) {
+static inline jlong replicate8_imm(int con, int width) {
   // Load a constant of "width" (in bytes) and replicate it to fill 64bit.
   assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here");
   int bit_width = width * 8;
@@ -2155,8 +2154,7 @@
     val |= (val << bit_width);
     bit_width <<= 1;
   }
-  jdouble dval = *((jdouble*) &val);  // coerce to double type
-  return dval;
+  return val;
 }
 
 #ifndef PRODUCT
--- a/src/cpu/zero/vm/bytes_zero.hpp	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/cpu/zero/vm/bytes_zero.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -165,12 +165,8 @@
 #ifdef VM_LITTLE_ENDIAN
 // The following header contains the implementations of swap_u2,
 // swap_u4, and swap_u8
-#ifdef TARGET_OS_ARCH_linux_zero
-# include "bytes_linux_zero.inline.hpp"
-#endif
-#ifdef TARGET_OS_ARCH_bsd_zero
-# include "bytes_bsd_zero.inline.hpp"
-#endif
+
+#include OS_CPU_HEADER_INLINE(bytes)
 
 #endif // VM_LITTLE_ENDIAN
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/macroAssembler_zero.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP
+#define CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP
+
+// Needed for includes in shared files.
+
+#endif // CPU_ZERO_VM_MACROASSEMBLER_ZERO_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/macroAssembler_zero.inline.hpp	Fri Aug 05 09:50:25 2016 -0700
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP
+#define CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP
+
+// Needed for includes in shared files.
+
+#endif // CPU_ZERO_VM_MACROASSEMBLER_ZERO_INLINE_HPP
--- a/src/jdk.hotspot.agent/doc/ReadMe-JavaScript.text	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-The HotSpot Serviceability Agent (SA) is a debugger for hotspot core
-dumps and hung processes. There is a read-only JDI (Java Debugger
-Interface) implementation on top of SA. This is part of JDK product and
-the classes are in $JDK/tools/sa-jdi.jar.
-
-In addition, there are few serviceability tools in $JDK/bin, namely,
-jstack (java stack trace tool), jmap (heap tool), jinfo (Java config
-tool) and jsadebugd. The classes for these are also in sa-jdi.jar
-file. sa-jdi.jar file is built along with hotspot (libjvm.so) on Solaris
-and Linux platforms. On Windows platform, SA-JDI is not included and
-serviceability tools do not use SA.
-
-Apart from these, HotSpot SA consists of a number of tools that are
-*not* included in JDK product bits.
-
-The sources and makefile for all-of-SA (including non-productized stuff)
-are under $HOTSPOT_WS/agent directory. The makefile $HOTSPOT/agent/make
-directory and shell scripts (and batch files) are used to build and run
-SA non-product tools. There is also documentation of SA under
-$HOTSPOT/agent/doc directory.
-
-To build complete SA, you need to have Rhino Mozilla jar (js.jar)
-version 1.5R5 under $HOTSPOT/agent/src/share/lib directory. Rhino is
-JavaScript interpreter written in Java. Rhino is used to implement SA
-features such as
-
-* SA command line debugger's JavaScript interface
- - refer to $HOTSPOT/agent/doc/clhsdb.html
- - refer to $HOTSPOT/agent/doc/jsdb.html
-* SA simple object query language (SOQL) 
- - language to query Java heap. 
-
-Rhino's "js.jar" is not included in hotspot source bundles. You need to
-download it from http://www.mozilla.org/rhino/download.html.
- 
-Without js.jar, $HOTSPOT/agent/make/Makefile will fail to build. But,
-note that sa-jdi.jar containing the productized portions of SA will
-still be built when you build hotspot JVM. 
--- a/src/jdk.hotspot.agent/share/classes/module-info.java	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/jdk.hotspot.agent/share/classes/module-info.java	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,12 +28,8 @@
     requires java.desktop;
     requires java.rmi;
     requires java.scripting;
-    requires jdk.jdi;
 
     // RMI needs to serialize types in this package
     exports sun.jvm.hotspot.debugger.remote to java.rmi;
-    provides com.sun.jdi.connect.Connector with sun.jvm.hotspot.jdi.SACoreAttachingConnector;
-    provides com.sun.jdi.connect.Connector with sun.jvm.hotspot.jdi.SADebugServerAttachingConnector;
-    provides com.sun.jdi.connect.Connector with sun.jvm.hotspot.jdi.SAPIDAttachingConnector;
 
 }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java	Thu Aug 04 15:52:14 2016 -0700
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/SALauncher.java	Fri Aug 05 09:50:25 2016 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
 
     private static boolean launcherHelp() {
         System.out.println("    clhsdb       \tcommand line debugger");
+        System.out.println("    debugd       \tdebug server");
         System.out.println("    hsdb         \tui debugger");
         System.out.println("    jstack --help\tto get more information");
         System.out.println("    jmap   --help\tto get more information");
@@ -54,6 +55,21 @@
         return false;
     }
 
+    private static boolean debugdHelp() {
+        // [options] <pid> [server-id]
+        // [options] <executable> <core> [server-id]
+        java.io.PrintStream out = System.out;
+        out.print(" [option] <pid> [server-id]");
+        out.println("\t\t(to connect to a live java process)");
+        out.print("   or  [option] <executable> <core> [server-id]");
+        out.println("\t\t(to connect to a core file produced by <executable>)");
+        out.print("\t\tserver-id is an optional unique id for this debug server, needed ");
+        out.println("\t\tif multiple debug servers are run on the same machine");
+        out.println("where option includes:");
+        out.println("   -h | -help\tto print this help message");
+        return false;
+    }
+
     private static boolean jinfoHelp() {
         // --flags -> -flags
         // --sysprops -> -sysprops
@@ -106,6 +122,9 @@
         if (toolName.equals("jsnap")) {
             return jsnapHelp();
         }
+        if (toolName.equals("debugd")) {
+            return debugdHelp();
+        }
         if (toolName.equals("hsdb") || toolName.equals("clhsdb")) {
             return commonHelp();
         }
@@ -377,13 +396,28 @@
         JSnap.main(newArgs.toArray(new String[newArgs.size()]));
     }
 
+    private static void runDEBUGD(String[] oldArgs) {
+        if ((oldArgs.length < 1) || (oldArgs.length > 3)) {
+            debugdHelp();
+        }
+
+        // By default SA agent classes prefer Windows process debugger
+        // to windbg debugger. SA expects special properties to be set
+        // to choose other debuggers. We will set those here before
+        // attaching to SA agent.
+        System.setProperty("sun.jvm.hotspot.debugger.useWindbgDebugger", "true");
+
+        // delegate to the actual SA debug server.
+        sun.jvm.hotspot.DebugServer.main(oldArgs);
+    }
+
     public static void main(String[] args) {
         // Provide a help
         if (args.length == 0) {
             launcherHelp();
             return;
         }
-        // No arguments imply help for jstack, jmap, jinfo but launch clhsdb and hsdb
+        // No arguments imply help for debugd, jstack, jmap, jinfo but launch clhsdb and hsdb
         if (args.length == 1 && !args[0].equals("clhsdb") && !args[0].equals("hsdb")) {
             toolHelp(args[0]);
             return;
@@ -431,6 +465,11 @@
                 return;
             }
 
+            if (args[0].equals("debugd")) {
+                runDEBUGD(oldArgs);
+                return;
+            }
+
             throw new SAGetoptException("Unknown tool: " + args[0]);
         } catch (SAGetoptException e) {
             System.err.println(e.getMessage());
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ArrayReferenceImpl.java	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.jdi;
-
-import com.sun.jdi.*;
-
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Iterator;
-
-import sun.jvm.hotspot.oops.Instance;
-import sun.jvm.hotspot.oops.Array;
-import sun.jvm.hotspot.runtime.BasicType;
-import sun.jvm.hotspot.utilities.Assert;
-
-public class ArrayReferenceImpl extends ObjectReferenceImpl
-    implements ArrayReference
-{
-    private int length;
-    ArrayReferenceImpl(VirtualMachine aVm, sun.jvm.hotspot.oops.Array aRef) {
-        super(aVm, aRef);
-        length = (int) aRef.getLength();
-    }
-
-    ArrayTypeImpl arrayType() {
-        return (ArrayTypeImpl)type();
-    }
-
-    /**
-     * Return array length.
-     */
-    public int length() {
-        return length;
-    }
-
-    public Value getValue(int index) {
-        List list = getValues(index, 1);
-        return (Value)list.get(0);
-    }
-
-    public List getValues() {
-        return getValues(0, -1);
-    }
-
-    /**
-     * Validate that the range to set/get is valid.
-     * length of -1 (meaning rest of array) has been converted
-     * before entry.
-     */
-    private void validateArrayAccess(int index, int len) {
-        // because length can be computed from index,
-        // index must be tested first for correct error message
-        if ((index < 0) || (index > length())) {
-            throw new IndexOutOfBoundsException(
-                        "Invalid array index: " + index);
-        }
-        if (len < 0) {
-            throw new IndexOutOfBoundsException(
-                        "Invalid array range length: " + len);
-        }
-        if (index + len > length()) {
-            throw new IndexOutOfBoundsException(
-                        "Invalid array range: " +
-                        index + " to " + (index + len - 1));
-        }
-    }
-
-    public List getValues(int index, int len) {
-        if (len == -1) { // -1 means the rest of the array
-           len = length() - index;
-        }
-        validateArrayAccess(index, len);
-        List vals = new ArrayList();
-        if (len == 0) {
-            return vals;
-        }
-
-        sun.jvm.hotspot.oops.TypeArray typeArray = null;
-        sun.jvm.hotspot.oops.ObjArray objArray = null;
-        if (ref() instanceof sun.jvm.hotspot.oops.TypeArray) {
-            typeArray = (sun.jvm.hotspot.oops.TypeArray)ref();
-        } else if (ref() instanceof sun.jvm.hotspot.oops.ObjArray) {
-            objArray = (sun.jvm.hotspot.oops.ObjArray)ref();
-        } else {
-            throw new RuntimeException("should not reach here");
-        }
-
-        char c = arrayType().componentSignature().charAt(0);
-        BasicType variableType = BasicType.charToBasicType(c);
-
-        final int limit = index + len;
-        for (int ii = index; ii < limit; ii++) {
-            ValueImpl valueImpl;
-            if (variableType == BasicType.T_BOOLEAN) {
-                valueImpl = (BooleanValueImpl) vm.mirrorOf(typeArray.getBooleanAt(ii));
-            } else if (variableType == BasicType.T_CHAR) {
-                valueImpl = (CharValueImpl) vm.mirrorOf(typeArray.getCharAt(ii));
-            } else if (variableType == BasicType.T_FLOAT) {
-                valueImpl = (FloatValueImpl) vm.mirrorOf(typeArray.getFloatAt(ii));
-            } else if (variableType == BasicType.T_DOUBLE) {
-                valueImpl =  (DoubleValueImpl) vm.mirrorOf(typeArray.getDoubleAt(ii));
-            } else if (variableType == BasicType.T_BYTE) {
-                valueImpl =  (ByteValueImpl) vm.mirrorOf(typeArray.getByteAt(ii));
-            } else if (variableType == BasicType.T_SHORT) {
-                valueImpl =  (ShortValueImpl) vm.mirrorOf(typeArray.getShortAt(ii));
-            } else if (variableType == BasicType.T_INT) {
-                valueImpl =  (IntegerValueImpl) vm.mirrorOf(typeArray.getIntAt(ii));
-            } else if (variableType == BasicType.T_LONG) {
-                valueImpl =  (LongValueImpl) vm.mirrorOf(typeArray.getLongAt(ii));
-            } else if (variableType == BasicType.T_OBJECT) {
-                // we may have an [Ljava/lang/Object; - i.e., Object[] with the
-                // elements themselves may be arrays because every array is an Object.
-                valueImpl = (ObjectReferenceImpl) vm.objectMirror(objArray.getObjAt(ii));
-            } else if (variableType == BasicType.T_ARRAY) {
-                valueImpl = (ArrayReferenceImpl) vm.arrayMirror((Array) objArray.getObjAt(ii));
-            } else {
-                throw new RuntimeException("should not reach here");
-            }
-            vals.add (valueImpl);
-        }
-        return vals;
-    }
-
-    public void setValue(int index, Value value)
-            throws InvalidTypeException,
-                   ClassNotLoadedException {
-        vm.throwNotReadOnlyException("ArrayReference.setValue(...)");
-    }
-
-    public void setValues(List values)
-            throws InvalidTypeException,
-                   ClassNotLoadedException {
-        setValues(0, values, 0, -1);
-    }
-
-    public void setValues(int index, List values,
-                          int srcIndex, int length)
-            throws InvalidTypeException,
-                   ClassNotLoadedException {
-
-        vm.throwNotReadOnlyException("ArrayReference.setValue(...)");
-
-    }
-
-    public String toString() {
-        return "instance of " + arrayType().componentTypeName() +
-               "[" + length() + "] (id=" + uniqueID() + ")";
-    }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/jdi/ArrayTypeImpl.java	Thu Aug 04 15:52:14 2016 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.jdi;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import sun.jvm.hotspot.oops.ArrayKlass;
-import sun.jvm.hotspot.oops.Instance;
-import sun.jvm.hotspot.oops.InstanceKlass;
-import sun.jvm.hotspot.oops.Klass;
-import sun.jvm.hotspot.oops.ObjArrayKlass;
-import sun.jvm.hotspot.oops.Symbol;
-import sun.jvm.hotspot.oops.TypeArrayKlass;
-
-import com.sun.jdi.ArrayReference;
-import com.sun.jdi.ArrayType;
-import com.sun.jdi.ClassLoaderReference;
-import com.sun.jdi.ClassNotLoadedException;
-import com.sun.jdi.InterfaceType;
-import com.sun.jdi.Method;
-import com.sun.jdi.PrimitiveType;
-import com.sun.jdi.ReferenceType;
-import com.sun.jdi.Type;
-import com.sun.jdi.VirtualMachine;
-
-public class ArrayTypeImpl extends ReferenceTypeImpl implements ArrayType {
-  protected ArrayTypeImpl(VirtualMachine aVm, ArrayKlass aRef) {
-        super(aVm, aRef);
-    }
-
-    public ArrayReference newInstance(int length) {
-        vm.throwNotReadOnlyException("ArrayType.newInstance(int)");
-        return null;
-    }
-
-    public String componentSignature() {
-        return signature().substring(1); // Just skip the leading '['
-    }
-
-    public String componentTypeName() {
-        JNITypeParser parser = new JNITypeParser(componentSignature());
-        return parser.typeName();
-    }
-
-    public ClassLoaderReference classLoader() {
-        if (ref() instanceof TypeArrayKlass) {
-            // primitive array klasses are loaded by bootstrap loader
-            return null;
-        } else {
-            Klass bottomKlass = ((ObjArrayKlass)ref()).getBottomKlass();
-            if (bottomKlass instanceof TypeArrayKlass) {
-                // multidimensional primitive array klasses are loaded by bootstrap loader
-                return null;
-            } else {
-                // class loader of any other obj array klass is same as the loader
-                // that loaded the bottom InstanceKlass
-                Instance xx = (Instance)(((InstanceKlass) bottomKlass).getClassLoader());
-                return vm.classLoaderMirror(xx);
-            }
-        }
-    }
-
-    @Override
-    void addVisibleMethods(Map<String, Method> methodMap, Set<InterfaceType> handledInterfaces) {
-        // arrays don't have methods
-    }
-
-    List getAllMethods() {
-        // arrays don't have methods
-        // JLS says arrays have methods of java.lang.Object. But
-        // JVMDI-JDI returns zero size list. We do the same here
-        // for consistency.
-        return new ArrayList(0);
-    }
-
-    /*
-     * Find the type object, if any, of a component type of this array.
-     * The component type does not have to be immediate; e.g. this method
-     * can be used to find the component Foo of Foo[][].
-     */
-    public Type componentType() throws ClassNotLoadedException {
-        ArrayKlass k = (ArrayKlass) ref();
-        if (k instanceof ObjArrayKlass) {
-            Klass elementKlass = ((ObjArrayKlass)k).getElementKlass();
-            if (elementKlass == null) {
-                throw new ClassNotLoadedException(componentSignature());
-            } else {
-                return vm.referenceType(elementKlass);
-            }
-        } else {
-            // It's a primitive type
-            return vm.primitiveTypeMirror(signature().charAt(1));
-        }
-    }
-
-    static boolean isComponentAssignable(Type destination, Type source) {
-        if (source instanceof PrimitiveType) {
-            // Assignment of primitive arrays requires identical
-            // component types.
-            return source.equals(destination);
-        } else {
-           if (destination instanceof PrimitiveType) {
-                return false;
-            }
-
-            ReferenceTypeImpl refSource = (ReferenceTypeImpl)source;
-            ReferenceTypeImpl refDestination = (ReferenceTypeImpl)destination;
-            // Assignment of object arrays requires availability
-            // of widening conversion of component types
-            return refSource.isAssignableTo(refDestination);
-        }
-    }
-
-
-    /*
-    * Return true if an instance of the  given reference type
-    * can be assigned to a variable of this type
-    */
-    boolean isAssignableTo(ReferenceType destType) {
-        if (destType instanceof ArrayType) {
-            try {
-                Type destComponentType = ((ArrayType)destType).componentType();
-                return isComponentAssignable(destComponentType, componentType());
-            } catch (ClassNotLoadedException e) {
-                // One or both component types has not yet been
-                // loaded => can't assign
-                return false;
-            }
-        } else {
-            Symbol typeName = ((ReferenceTypeImpl)destType).typeNameAsSymbol();
-            if (destType instanceof InterfaceType) {
-                // Every array type implements java.io.Serializable and
-                // java.lang.Cloneable. fixme in JVMDI-JDI, includes only
-                // Cloneable but not Serializable.
-                return typeName.equals(vm.javaLangCloneable()) ||
-                       typeName.equals(vm.javaIoSerializable());
-            } else {
-                // Only valid ClassType assignee is Object
-                return typeName.equals(vm.javaLangObject());
-            }
-        }
-    }
-
-    List inheritedTypes() {
-        // arrays are derived from java.lang.Object and
-        // B[] is derived from A[] if B is derived from A.
-        // But JVMDI-JDI returns zero sized list and we do the
-        // same for consistency.
-        return new ArrayList(0);
-    }
-
-    int getModifiers() {
-        /*
-         * For object arrays, the return values for Interface
-         * Accessible.isPrivate(), Accessible.isProtected(),
-         * etc... are the same as would be returned for the
-         * component type.  Fetch the modifier bits from the