changeset 42664:29142a56c193

8168503: JEP 297: Unified arm32/arm64 Port Reviewed-by: kvn, enevill, ihse, dholmes, erikj, coleenp, cjplummer
author bobv
date Mon, 19 Dec 2016 12:39:01 -0500
parents 2335df372367
children 206362a31f79
files hotspot/make/gensrc/GensrcAdlc.gmk hotspot/make/lib/CompileJvm.gmk hotspot/make/lib/JvmFeatures.gmk hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp hotspot/src/cpu/arm/vm/arm.ad hotspot/src/cpu/arm/vm/arm_32.ad hotspot/src/cpu/arm/vm/arm_64.ad hotspot/src/cpu/arm/vm/assembler_arm.cpp hotspot/src/cpu/arm/vm/assembler_arm.hpp hotspot/src/cpu/arm/vm/assembler_arm.inline.hpp hotspot/src/cpu/arm/vm/assembler_arm_32.cpp hotspot/src/cpu/arm/vm/assembler_arm_32.hpp hotspot/src/cpu/arm/vm/assembler_arm_64.cpp hotspot/src/cpu/arm/vm/assembler_arm_64.hpp hotspot/src/cpu/arm/vm/bytes_arm.hpp hotspot/src/cpu/arm/vm/c1_CodeStubs_arm.cpp hotspot/src/cpu/arm/vm/c1_Defs_arm.hpp hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.cpp hotspot/src/cpu/arm/vm/c1_FpuStackSim_arm.hpp hotspot/src/cpu/arm/vm/c1_FrameMap_arm.cpp hotspot/src/cpu/arm/vm/c1_FrameMap_arm.hpp hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.cpp hotspot/src/cpu/arm/vm/c1_LIRAssembler_arm.hpp hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.cpp hotspot/src/cpu/arm/vm/c1_LIRGenerator_arm.hpp hotspot/src/cpu/arm/vm/c1_LIR_arm.cpp hotspot/src/cpu/arm/vm/c1_LinearScan_arm.cpp hotspot/src/cpu/arm/vm/c1_LinearScan_arm.hpp hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.cpp hotspot/src/cpu/arm/vm/c1_MacroAssembler_arm.hpp hotspot/src/cpu/arm/vm/c1_Runtime1_arm.cpp hotspot/src/cpu/arm/vm/c1_globals_arm.hpp hotspot/src/cpu/arm/vm/c2_globals_arm.hpp hotspot/src/cpu/arm/vm/codeBuffer_arm.hpp hotspot/src/cpu/arm/vm/compiledIC_arm.cpp hotspot/src/cpu/arm/vm/copy_arm.hpp hotspot/src/cpu/arm/vm/debug_arm.cpp hotspot/src/cpu/arm/vm/depChecker_arm.cpp hotspot/src/cpu/arm/vm/depChecker_arm.hpp hotspot/src/cpu/arm/vm/disassembler_arm.hpp hotspot/src/cpu/arm/vm/frame_arm.cpp hotspot/src/cpu/arm/vm/frame_arm.hpp hotspot/src/cpu/arm/vm/frame_arm.inline.hpp hotspot/src/cpu/arm/vm/globalDefinitions_arm.hpp hotspot/src/cpu/arm/vm/globals_arm.hpp hotspot/src/cpu/arm/vm/icBuffer_arm.cpp hotspot/src/cpu/arm/vm/icache_arm.cpp hotspot/src/cpu/arm/vm/icache_arm.hpp hotspot/src/cpu/arm/vm/interp_masm_arm.cpp hotspot/src/cpu/arm/vm/interp_masm_arm.hpp hotspot/src/cpu/arm/vm/interpreterRT_arm.cpp hotspot/src/cpu/arm/vm/interpreterRT_arm.hpp hotspot/src/cpu/arm/vm/javaFrameAnchor_arm.hpp hotspot/src/cpu/arm/vm/jniFastGetField_arm.cpp hotspot/src/cpu/arm/vm/jniTypes_arm.hpp hotspot/src/cpu/arm/vm/jni_arm.h hotspot/src/cpu/arm/vm/jvmciCodeInstaller_arm.cpp hotspot/src/cpu/arm/vm/macroAssembler_arm.cpp hotspot/src/cpu/arm/vm/macroAssembler_arm.hpp hotspot/src/cpu/arm/vm/macroAssembler_arm.inline.hpp hotspot/src/cpu/arm/vm/metaspaceShared_arm.cpp hotspot/src/cpu/arm/vm/methodHandles_arm.cpp hotspot/src/cpu/arm/vm/methodHandles_arm.hpp hotspot/src/cpu/arm/vm/nativeInst_arm.hpp hotspot/src/cpu/arm/vm/nativeInst_arm_32.cpp hotspot/src/cpu/arm/vm/nativeInst_arm_32.hpp hotspot/src/cpu/arm/vm/nativeInst_arm_64.cpp hotspot/src/cpu/arm/vm/nativeInst_arm_64.hpp hotspot/src/cpu/arm/vm/registerMap_arm.hpp hotspot/src/cpu/arm/vm/register_arm.cpp hotspot/src/cpu/arm/vm/register_arm.hpp hotspot/src/cpu/arm/vm/register_definitions_arm.cpp hotspot/src/cpu/arm/vm/relocInfo_arm.cpp hotspot/src/cpu/arm/vm/relocInfo_arm.hpp hotspot/src/cpu/arm/vm/runtime_arm.cpp hotspot/src/cpu/arm/vm/sharedRuntime_arm.cpp hotspot/src/cpu/arm/vm/stubGenerator_arm.cpp hotspot/src/cpu/arm/vm/stubRoutinesCrypto_arm.cpp hotspot/src/cpu/arm/vm/stubRoutines_arm.cpp hotspot/src/cpu/arm/vm/stubRoutines_arm.hpp hotspot/src/cpu/arm/vm/templateInterpreterGenerator_arm.cpp hotspot/src/cpu/arm/vm/templateTable_arm.cpp hotspot/src/cpu/arm/vm/templateTable_arm.hpp hotspot/src/cpu/arm/vm/vmStructs_arm.hpp hotspot/src/cpu/arm/vm/vm_version_arm.hpp hotspot/src/cpu/arm/vm/vm_version_arm_32.cpp hotspot/src/cpu/arm/vm/vm_version_arm_64.cpp hotspot/src/cpu/arm/vm/vmreg_arm.cpp hotspot/src/cpu/arm/vm/vmreg_arm.hpp hotspot/src/cpu/arm/vm/vmreg_arm.inline.hpp hotspot/src/cpu/arm/vm/vtableStubs_arm.cpp hotspot/src/jdk.hotspot.agent/linux/native/libsaproc/libproc.h hotspot/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp hotspot/src/os_cpu/linux_arm/vm/bytes_linux_arm.inline.hpp hotspot/src/os_cpu/linux_arm/vm/copy_linux_arm.inline.hpp hotspot/src/os_cpu/linux_arm/vm/globals_linux_arm.hpp hotspot/src/os_cpu/linux_arm/vm/linux_arm_32.s hotspot/src/os_cpu/linux_arm/vm/linux_arm_64.s hotspot/src/os_cpu/linux_arm/vm/macroAssembler_linux_arm_32.cpp hotspot/src/os_cpu/linux_arm/vm/orderAccess_linux_arm.inline.hpp hotspot/src/os_cpu/linux_arm/vm/os_linux_arm.cpp hotspot/src/os_cpu/linux_arm/vm/os_linux_arm.hpp hotspot/src/os_cpu/linux_arm/vm/prefetch_linux_arm.inline.hpp hotspot/src/os_cpu/linux_arm/vm/thread_linux_arm.cpp hotspot/src/os_cpu/linux_arm/vm/thread_linux_arm.hpp hotspot/src/os_cpu/linux_arm/vm/vmStructs_linux_arm.hpp hotspot/src/os_cpu/linux_arm/vm/vm_version_linux_arm_32.cpp hotspot/src/share/vm/c1/c1_Runtime1.cpp hotspot/src/share/vm/code/codeBlob.cpp hotspot/src/share/vm/code/codeBlob.hpp hotspot/src/share/vm/code/codeCache.hpp hotspot/src/share/vm/code/codeCacheExtensions.hpp hotspot/src/share/vm/code/codeCacheExtensions_ext.hpp hotspot/src/share/vm/code/relocInfo_ext.cpp hotspot/src/share/vm/code/relocInfo_ext.hpp hotspot/src/share/vm/code/stubs.cpp hotspot/src/share/vm/code/stubs.hpp hotspot/src/share/vm/interpreter/interpreterRuntime.cpp hotspot/src/share/vm/interpreter/templateInterpreter.cpp hotspot/src/share/vm/interpreter/templateInterpreterGenerator.cpp hotspot/src/share/vm/memory/heap.hpp hotspot/src/share/vm/memory/virtualspace.cpp hotspot/src/share/vm/precompiled/precompiled.hpp hotspot/src/share/vm/prims/methodHandles.cpp hotspot/src/share/vm/runtime/arguments.cpp hotspot/src/share/vm/runtime/init.cpp hotspot/src/share/vm/runtime/sharedRuntime.cpp hotspot/src/share/vm/runtime/stubCodeGenerator.cpp hotspot/src/share/vm/runtime/stubRoutines.cpp hotspot/src/share/vm/runtime/thread.cpp hotspot/src/share/vm/runtime/vm_operations.cpp hotspot/src/share/vm/runtime/vm_version.cpp
diffstat 132 files changed, 62940 insertions(+), 545 deletions(-) [+]
line wrap: on
line diff
--- a/hotspot/make/gensrc/GensrcAdlc.gmk	Mon Dec 19 00:49:34 2016 +0100
+++ b/hotspot/make/gensrc/GensrcAdlc.gmk	Mon Dec 19 12:39:01 2016 -0500
@@ -114,6 +114,10 @@
     ADLCFLAGS += -U_LP64
   endif
 
+  ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
+    ADLCFLAGS += -DARM=1
+  endif
+
   ##############################################################################
   # Concatenate all ad source files into a single file, which will be fed to
   # adlc. Also include a #line directive at the start of every included file
--- a/hotspot/make/lib/CompileJvm.gmk	Mon Dec 19 00:49:34 2016 +0100
+++ b/hotspot/make/lib/CompileJvm.gmk	Mon Dec 19 12:39:01 2016 -0500
@@ -63,8 +63,8 @@
 # INCLUDE_SUFFIX_* is only meant for including the proper
 # platform files. Don't use it to guard code. Use the value of
 # HOTSPOT_TARGET_CPU_DEFINE etc. instead.
-# Remaining TARGET_ARCH_* is needed to distinguish closed and open
-# 64-bit ARM ports (also called AARCH64).
+# Remaining TARGET_ARCH_* is needed to select the cpu specific 
+# sources for 64-bit ARM ports (arm versus aarch64).
 JVM_CFLAGS_TARGET_DEFINES += \
     -DTARGET_ARCH_$(HOTSPOT_TARGET_CPU_ARCH) \
     -DINCLUDE_SUFFIX_OS=_$(HOTSPOT_TARGET_OS) \
@@ -139,6 +139,20 @@
 ################################################################################
 # Platform specific setup
 
+# ARM source selection
+
+ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-arm)
+  JVM_EXCLUDE_PATTERNS += arm_64
+
+else ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU), linux-aarch64)
+  # For 64-bit arm builds, we use the 64 bit hotspot/src/cpu/arm 
+  # hotspot sources if HOTSPOT_TARGET_CPU_ARCH is set to arm.
+  # Exclude the aarch64 and 32 bit arm files for this build.
+  ifeq ($(HOTSPOT_TARGET_CPU_ARCH), arm)
+    JVM_EXCLUDE_PATTERNS += arm_32 aarch64
+  endif
+endif
+
 ifneq ($(filter $(OPENJDK_TARGET_OS), linux macosx windows), )
   JVM_PRECOMPILED_HEADER := $(HOTSPOT_TOPDIR)/src/share/vm/precompiled/precompiled.hpp
 endif
--- a/hotspot/make/lib/JvmFeatures.gmk	Mon Dec 19 00:49:34 2016 +0100
+++ b/hotspot/make/lib/JvmFeatures.gmk	Mon Dec 19 12:39:01 2016 -0500
@@ -154,3 +154,108 @@
       compiledIC_aot_x86_64.cpp compilerRuntime.cpp \
       aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp
 endif
+################################################################################
+
+ifeq ($(call check-jvm-feature, link-time-opt), true)
+  # NOTE: Disable automatic opimization level and let the explicit cflag control
+  # optimization level instead. This activates O3 on slowdebug builds, just
+  # like the old build, but it's probably not right.
+  JVM_OPTIMIZATION :=
+  JVM_CFLAGS_FEATURES += -O3 -flto
+  JVM_LDFLAGS_FEATURES += -O3 -flto -fwhole-program -fno-strict-aliasing
+endif
+
+ifeq ($(call check-jvm-feature, minimal), true)
+  ifeq ($(call check-jvm-feature, link-time-opt), false)
+    JVM_OPTIMIZATION := SIZE
+    OPT_SPEED_SRC := \
+        allocation.cpp \
+        assembler.cpp \
+        assembler_linux_arm.cpp \
+        barrierSet.cpp \
+        basicLock.cpp \
+        biasedLocking.cpp \
+        bytecode.cpp \
+        bytecodeInterpreter.cpp \
+        bytecodeInterpreter_x86.cpp \
+        c1_Compilation.cpp \
+        c1_Compiler.cpp \
+        c1_GraphBuilder.cpp \
+        c1_LinearScan.cpp \
+        c1_LIR.cpp \
+        ciEnv.cpp \
+        ciObjectFactory.cpp \
+        codeBlob.cpp \
+        constantPool.cpp \
+        constMethod.cpp \
+        classLoader.cpp \
+        classLoaderData.cpp \
+        classFileParser.cpp \
+        classFileStream.cpp \
+        cpCache.cpp \
+        defNewGeneration.cpp \
+        frame_arm.cpp \
+        genCollectedHeap.cpp \
+        generation.cpp \
+        genMarkSweep.cpp \
+        growableArray.cpp \
+        handles.cpp \
+        hashtable.cpp \
+        heap.cpp \
+        icache.cpp \
+        icache_arm.cpp \
+        instanceKlass.cpp \
+        invocationCounter.cpp \
+        iterator.cpp \
+        javaCalls.cpp \
+        javaClasses.cpp \
+        jniFastGetField_arm.cpp \
+        jvm.cpp \
+        jvm_linux.cpp \
+        linkResolver.cpp \
+        klass.cpp \
+        klassVtable.cpp \
+        markSweep.cpp \
+        memRegion.cpp \
+        memoryPool.cpp \
+        method.cpp \
+        methodHandles.cpp \
+        methodHandles_arm.cpp \
+        methodLiveness.cpp \
+        metablock.cpp \
+        metaspace.cpp \
+        mutex.cpp \
+        mutex_linux.cpp \
+        mutexLocker.cpp \
+        nativeLookup.cpp \
+        objArrayKlass.cpp \
+        os_linux.cpp \
+        os_linux_arm.cpp \
+        placeHolders.cpp \
+        quickSort.cpp \
+        resourceArea.cpp \
+        rewriter.cpp \
+        sharedRuntime.cpp \
+        signature.cpp \
+        space.cpp \
+        stackMapTable.cpp \
+        symbolTable.cpp \
+        systemDictionary.cpp \
+        symbol.cpp \
+        synchronizer.cpp \
+        threadLS_bsd_x86.cpp \
+        threadLS_linux_arm.cpp \
+        threadLS_linux_x86.cpp \
+        timer.cpp \
+        typeArrayKlass.cpp \
+        unsafe.cpp \
+        utf8.cpp \
+        vmSymbols.cpp \
+        #
+
+    $(foreach s, $(OPT_SPEED_SRC), \
+        $(eval BUILD_LIBJVM_$s_OPTIMIZATION := HIGHEST_JVM))
+
+    BUILD_LIBJVM_systemDictionary.cpp_CXXFLAGS := -fno-optimize-sibling-calls
+  endif
+endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/arm/vm/abstractInterpreter_arm.cpp	Mon Dec 19 12:39:01 2016 -0500
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interpreter/bytecode.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/constMethod.hpp"
+#include "oops/method.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/synchronizer.hpp"
+#include "utilities/macros.hpp"
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+#ifdef AARCH64
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : // fall through
+    case T_LONG   : // fall through
+    case T_VOID   : // fall through
+    case T_FLOAT  : // fall through
+    case T_DOUBLE : i = 4; break;
+    case T_OBJECT : // fall through
+    case T_ARRAY  : i = 5; break;
+#else
+    case T_VOID   : i = 0; break;
+    case T_BOOLEAN: i = 1; break;
+    case T_CHAR   : i = 2; break;
+    case T_BYTE   : i = 3; break;
+    case T_SHORT  : i = 4; break;
+    case T_INT    : i = 5; break;
+    case T_OBJECT : // fall through
+    case T_ARRAY  : i = 6; break;
+    case T_LONG   : i = 7; break;
+    case T_FLOAT  : i = 8; break;
+    case T_DOUBLE : i = 9; break;
+#endif // AARCH64
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+
+// These should never be compiled since the interpreter will prefer
+// the compiled version to the intrinsic version.
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  switch (method_kind(m)) {
+    case Interpreter::java_lang_math_sin     : // fall thru
+    case Interpreter::java_lang_math_cos     : // fall thru
+    case Interpreter::java_lang_math_tan     : // fall thru
+    case Interpreter::java_lang_math_abs     : // fall thru
+    case Interpreter::java_lang_math_log     : // fall thru
+    case Interpreter::java_lang_math_log10   : // fall thru
+    case Interpreter::java_lang_math_sqrt    :
+      return false;
+    default:
+      return true;
+  }
+}
+
+// How much stack a method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  const int stub_code = AARCH64_ONLY(24) NOT_AARCH64(12);  // see generate_call_stub
+  // Save space for one monitor to get into the interpreted method in case
+  // the method is synchronized
+  int monitor_size    = method->is_synchronized() ?
+                                1*frame::interpreter_frame_monitor_size() : 0;
+
+  // total overhead size: monitor_size + (sender SP, thru expr stack bottom).
+  // be sure to change this if you add/subtract anything to/from the overhead area
+  const int overhead_size = monitor_size +
+                            (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset);
+  const int method_stack = (method->max_locals() + method->max_stack()) *
+                           Interpreter::stackElementWords;
+  return overhead_size + method_stack + stub_code;
+}
+
+// asm based interpreter deoptimization helpers
+int AbstractInterpreter::size_activation(int max_stack,
+                                         int tempcount,
+                                         int extra_args,
+                                         int moncount,
+                                         int callee_param_count,
+                                         int callee_locals,
+                                         bool is_top_frame) {
+  // Note: This calculation must exactly parallel the frame setup
+  // in TemplateInterpreterGenerator::generate_fixed_frame.
+  // fixed size of an interpreter frame:
+  int overhead = frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset;
+
+  // Our locals were accounted for by the caller (or last_frame_adjust on the transistion)
+  // Since the callee parameters already account for the callee's params we only need to account for
+  // the extra locals.
+
+  int size = overhead +
+         ((callee_locals - callee_param_count)*Interpreter::stackElementWords) +
+         (moncount*frame::interpreter_frame_monitor_size()) +
+         tempcount*Interpreter::stackElementWords + extra_args;
+
+#ifdef AARCH64
+  size = round_to(size, StackAlignmentInBytes/BytesPerWord);
+#endif // AARCH64
+
+  return size;
+}
+
+void AbstractInterpreter::layout_activation(Method* method,
+                                            int tempcount,
+                                            int popframe_extra_args,
+                                            int moncount,
+                                            int caller_actual_parameters,
+                                            int callee_param_count,
+                                            int callee_locals,
+                                            frame* caller,
+                                            frame* interpreter_frame,
+                                            bool is_top_frame,
+                                            bool is_bottom_frame) {
+
+  // Set up the method, locals, and monitors.
+  // The frame interpreter_frame is guaranteed to be the right size,
+  // as determined by a previous call to the size_activation() method.
+  // It is also guaranteed to be walkable even though it is in a skeletal state
+  // NOTE: return size is in words not bytes
+
+  // fixed size of an interpreter frame:
+  int max_locals = method->max_locals() * Interpreter::stackElementWords;
+  int extra_locals = (method->max_locals() - method->size_of_parameters()) * Interpreter::stackElementWords;
+
+#ifdef ASSERT
+  assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable");
+#endif
+
+  interpreter_frame->interpreter_frame_set_method(method);
+  // NOTE the difference in using sender_sp and interpreter_frame_sender_sp
+  // interpreter_frame_sender_sp is the original sp of the caller (the unextended_sp)
+  // and sender_sp is (fp + sender_sp_offset*wordSize)
+
+#ifdef AARCH64
+  intptr_t* locals;
+  if (caller->is_interpreted_frame()) {
+    // attach locals to the expression stack of caller interpreter frame
+    locals = caller->interpreter_frame_tos_address() + caller_actual_parameters*Interpreter::stackElementWords - 1;
+  } else {
+    assert (is_bottom_frame, "should be");
+    locals = interpreter_frame->fp() + frame::sender_sp_offset + method->max_locals() - 1;
+  }
+
+  if (TraceDeoptimization) {
+    tty->print_cr("layout_activation:");
+
+    if (caller->is_entry_frame()) {
+      tty->print("entry ");
+    }
+    if (caller->is_compiled_frame()) {
+      tty->print("compiled ");
+    }
+    if (caller->is_interpreted_frame()) {
+      tty->print("interpreted ");
+    }
+    tty->print_cr("caller: sp=%p, unextended_sp=%p, fp=%p, pc=%p", caller->sp(), caller->unextended_sp(), caller->fp(), caller->pc());
+    tty->print_cr("interpreter_frame: sp=%p, unextended_sp=%p, fp=%p, pc=%p", interpreter_frame->sp(), interpreter_frame->unextended_sp(), interpreter_frame->fp(), interpreter_frame->pc());
+    tty->print_cr("method: max_locals = %d, size_of_parameters = %d", method->max_locals(), method->size_of_parameters());
+    tty->print_cr("caller_actual_parameters = %d", caller_actual_parameters);
+    tty->print_cr("locals = %p", locals);
+  }
+
+#ifdef ASSERT
+  if (caller_actual_parameters != method->size_of_parameters()) {
+    assert(caller->is_interpreted_frame(), "adjusted caller_actual_parameters, but caller is not interpreter frame");
+    Bytecode_invoke inv(caller->interpreter_frame_method(), caller->interpreter_frame_bci());
+
+    if (is_bottom_frame) {
+      assert(caller_actual_parameters == 0, "invalid adjusted caller_actual_parameters value for bottom frame");
+      assert(inv.is_invokedynamic() || inv.is_invokehandle(), "adjusted caller_actual_parameters for bottom frame, but not invokedynamic/invokehandle");
+    } else {
+      assert(caller_actual_parameters == method->size_of_parameters()+1, "invalid adjusted caller_actual_parameters value");
+      assert(!inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name()), "adjusted caller_actual_parameters, but no member arg");
+    }
+  }
+  if (caller->is_interpreted_frame()) {
+    intptr_t* locals_base = (locals - method->max_locals()*Interpreter::stackElementWords + 1);
+    locals_base = (intptr_t*)round_down((intptr_t)locals_base, StackAlignmentInBytes);
+    assert(interpreter_frame->sender_sp() <= locals_base, "interpreter-to-interpreter frame chaining");
+
+  } else if (caller->is_compiled_frame()) {
+    assert(locals + 1 <= caller->unextended_sp(), "compiled-to-interpreter frame chaining");
+
+  } else {
+    assert(caller->is_entry_frame(), "should be");
+    assert(locals + 1 <= caller->fp(), "entry-to-interpreter frame chaining");
+  }
+#endif // ASSERT
+
+#else
+  intptr_t* locals = interpreter_frame->sender_sp() + max_locals - 1;
+#endif // AARCH64
+
+  interpreter_frame->interpreter_frame_set_locals(locals);
+  BasicObjectLock* montop = interpreter_frame->interpreter_frame_monitor_begin();
+  BasicObjectLock* monbot = montop - moncount;
+  interpreter_frame->interpreter_frame_set_monitor_end(monbot);
+
+  // Set last_sp
+  intptr_t* stack_top = (intptr_t*) monbot  -
+    tempcount*Interpreter::stackElementWords -
+    popframe_extra_args;
+#ifdef AARCH64
+  interpreter_frame->interpreter_frame_set_stack_top(stack_top);
+
+  intptr_t* extended_sp = (intptr_t*) monbot  -
+    (method->max_stack() + 1) * Interpreter::stackElementWords - // +1 is reserved slot for exception handler
+    popframe_extra_args;
+  extended_sp = (intptr_t*)round_down((intptr_t)extended_sp, StackAlignmentInBytes);
+  interpreter_frame->interpreter_frame_set_extended_sp(extended_sp);
+#else
+  interpreter_frame->interpreter_frame_set_last_sp(stack_top);
+#endif // AARCH64
+
+  // All frames but the initial (oldest) interpreter frame we fill in have a
+  // value for sender_sp that allows walking the stack but isn't
+  // truly correct. Correct the value here.
+
+#ifdef AARCH64
+  if (caller->is_interpreted_frame()) {
+    intptr_t* sender_sp = (intptr_t*)round_down((intptr_t)caller->interpreter_frame_tos_address(), StackAlignmentInBytes);
+    interpreter_frame->set_interpreter_frame_sender_sp(sender_sp);
+
+  } else {
+    // in case of non-interpreter caller sender_sp of the oldest frame is already
+    // set to valid value
+  }
+#else
+  if (extra_locals != 0 &&
+      interpreter_frame->sender_sp() == interpreter_frame->interpreter_frame_sender_sp() ) {
+    interpreter_frame->set_interpreter_frame_sender_sp(caller->sp() + extra_locals);
+  }
+#endif // AARCH64
+
+  *interpreter_frame->interpreter_frame_cache_addr() =
+    method->constants()->cache();
+  *interpreter_frame->interpreter_frame_mirror_addr() =
+    method->method_holder()->java_mirror();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/arm/vm/arm.ad	Mon Dec 19 12:39:01 2016 -0500
@@ -0,0 +1,14428 @@
+//
+// Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+
+// ARM Architecture Description File
+
+//----------DEFINITION BLOCK---------------------------------------------------
+// Define name --> value mappings to inform the ADLC of an integer valued name
+// Current support includes integer values in the range [0, 0x7FFFFFFF]
+// Format:
+//        int_def  <name>         ( <int_value>, <expression>);
+// Generated Code in ad_<arch>.hpp
+//        #define  <name>   (<expression>)
+//        // value == <int_value>
+// Generated code in ad_<arch>.cpp adlc_verification()
+//        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
+//
+definitions %{
+// The default cost (of an ALU instruction).
+  int_def DEFAULT_COST      (    100,     100);
+  int_def HUGE_COST         (1000000, 1000000);
+
+// Memory refs are twice as expensive as run-of-the-mill.
+  int_def MEMORY_REF_COST   (    200, DEFAULT_COST * 2);
+
+// Branches are even more expensive.
+  int_def BRANCH_COST       (    300, DEFAULT_COST * 3);
+  int_def CALL_COST         (    300, DEFAULT_COST * 3);
+%}
+
+
+//----------SOURCE BLOCK-------------------------------------------------------
+// This is a block of C++ code which provides values, functions, and
+// definitions necessary in the rest of the architecture description
+source_hpp %{
+// Header information of the source block.
+// Method declarations/definitions which are used outside
+// the ad-scope can conveniently be defined here.
+//
+// To keep related declarations/definitions/uses close together,
+// we switch between source %{ }% and source_hpp %{ }% freely as needed.
+
+// Does destination need to be loaded in a register then passed to a
+// branch instruction?
+extern bool maybe_far_call(const CallNode *n);
+extern bool maybe_far_call(const MachCallNode *n);
+static inline bool cache_reachable() {
+  return MacroAssembler::_cache_fully_reachable();
+}
+
+#ifdef AARCH64
+#define ldr_32 ldr_w
+#define str_32 str_w
+#else
+#define ldr_32 ldr
+#define str_32 str
+#define tst_32 tst
+#define teq_32 teq
+#endif
+#if 1
+extern bool PrintOptoAssembly;
+#endif
+
+class c2 {
+public:
+  static OptoRegPair return_value(int ideal_reg);
+};
+
+class CallStubImpl {
+
+  //--------------------------------------------------------------
+  //---<  Used for optimization in Compile::Shorten_branches  >---
+  //--------------------------------------------------------------
+
+ public:
+  // Size of call trampoline stub.
+  static uint size_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+};
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+#ifdef AARCH64
+    // ldr_literal; br; (pad); <literal>
+    return 3 * Assembler::InstructionSize + wordSize;
+#else
+    return ( 3 * 4 );
+#endif
+  }
+
+
+  static uint size_deopt_handler() {
+    return ( 9 * 4 );
+  }
+
+};
+
+%}
+
+source %{
+#define __ _masm.
+
+static FloatRegister reg_to_FloatRegister_object(int register_encoding);
+static Register reg_to_register_object(int register_encoding);
+
+
+// ****************************************************************************
+
+// REQUIRED FUNCTIONALITY
+
+// Indicate if the safepoint node needs the polling page as an input.
+// Since ARM does not have absolute addressing, it does.
+bool SafePointNode::needs_polling_address_input() {
+  return true;
+}
+
+// emit an interrupt that is caught by the debugger (for debugging compiler)
+void emit_break(CodeBuffer &cbuf) {
+  MacroAssembler _masm(&cbuf);
+  __ breakpoint();
+}
+
+#ifndef PRODUCT
+void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
+  st->print("TA");
+}
+#endif
+
+void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  emit_break(cbuf);
+}
+
+uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
+  return MachNode::size(ra_);
+}
+
+
+void emit_nop(CodeBuffer &cbuf) {
+  MacroAssembler _masm(&cbuf);
+  __ nop();
+}
+
+
+void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
+  int ret_addr_offset0 = n->as_MachCall()->ret_addr_offset();
+  int call_site_offset = cbuf.insts()->mark_off();
+  MacroAssembler _masm(&cbuf);
+  __ set_inst_mark(); // needed in emit_to_interp_stub() to locate the call
+  address target = (address)m->method();
+  assert(n->as_MachCall()->entry_point() == target, "sanity");
+  assert(maybe_far_call(n) == !__ reachable_from_cache(target), "sanity");
+  assert(cache_reachable() == __ cache_fully_reachable(), "sanity");
+
+  assert(target != NULL, "need real address");
+
+  int ret_addr_offset = -1;
+  if (rspec.type() == relocInfo::runtime_call_type) {
+    __ call(target, rspec);
+    ret_addr_offset = __ offset();
+  } else {
+    // scratches Rtemp
+    ret_addr_offset = __ patchable_call(target, rspec, true);
+  }
+  assert(ret_addr_offset - call_site_offset == ret_addr_offset0, "fix ret_addr_offset()");
+}
+
+//=============================================================================
+// REQUIRED FUNCTIONALITY for encoding
+void emit_lo(CodeBuffer &cbuf, int val) {  }
+void emit_hi(CodeBuffer &cbuf, int val) {  }
+
+
+//=============================================================================
+const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
+
+int Compile::ConstantTable::calculate_table_base_offset() const {
+#ifdef AARCH64
+  return 0;
+#else
+  int offset = -(size() / 2);
+  // flds, fldd: 8-bit  offset multiplied by 4: +/- 1024
+  // ldr, ldrb : 12-bit offset:                 +/- 4096
+  if (!Assembler::is_simm10(offset)) {
+    offset = Assembler::min_simm10();
+  }
+  return offset;
+#endif
+}
+
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
+void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+  Compile* C = ra_->C;
+  Compile::ConstantTable& constant_table = C->constant_table();
+  MacroAssembler _masm(&cbuf);
+
+  Register r = as_Register(ra_->get_encode(this));
+  CodeSection* consts_section = __ code()->consts();
+  int consts_size = consts_section->align_at_start(consts_section->size());
+  assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size);
+
+  // Materialize the constant table base.
+  address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
+  RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
+  __ mov_address(r, baseaddr, rspec);
+}
+
+uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
+#ifdef AARCH64
+  return 5 * Assembler::InstructionSize;
+#else
+  return 8;
+#endif
+}
+
+#ifndef PRODUCT
+void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
+  char reg[128];
+  ra_->dump_register(this, reg);
+  st->print("MOV_SLOW    &constanttable,%s\t! constant table base", reg);
+}
+#endif
+
+#ifndef PRODUCT
+void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+  Compile* C = ra_->C;
+
+  for (int i = 0; i < OptoPrologueNops; i++) {
+    st->print_cr("NOP"); st->print("\t");
+  }
+#ifdef AARCH64
+  if (OptoPrologueNops <= 0) {
+    st->print_cr("NOP\t! required for safe patching");
+    st->print("\t");
+  }
+#endif
+
+  size_t framesize = C->frame_size_in_bytes();
+  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+  int bangsize = C->bang_size_in_bytes();
+  // Remove two words for return addr and rbp,
+  framesize -= 2*wordSize;
+  bangsize -= 2*wordSize;
+
+  // Calls to C2R adapters often do not accept exceptional returns.
+  // We require that their callers must bang for them.  But be careful, because
+  // some VM calls (such as call site linkage) can use several kilobytes of
+  // stack.  But the stack safety zone should account for that.
+  // See bugs 4446381, 4468289, 4497237.
+  if (C->need_stack_bang(bangsize)) {
+    st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t");
+  }
+  st->print_cr("PUSH   R_FP|R_LR_LR"); st->print("\t");
+  if (framesize != 0) {
+    st->print   ("SUB    R_SP, R_SP, " SIZE_FORMAT,framesize);
+  }
+}
+#endif
+
+void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+
+  for (int i = 0; i < OptoPrologueNops; i++) {
+    __ nop();
+  }
+#ifdef AARCH64
+  if (OptoPrologueNops <= 0) {
+    __ nop(); // required for safe patching by patch_verified_entry()
+  }
+#endif
+
+  size_t framesize = C->frame_size_in_bytes();
+  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+  int bangsize = C->bang_size_in_bytes();
+  // Remove two words for return addr and fp,
+  framesize -= 2*wordSize;
+  bangsize -= 2*wordSize;
+
+  // Calls to C2R adapters often do not accept exceptional returns.
+  // We require that their callers must bang for them.  But be careful, because
+  // some VM calls (such as call site linkage) can use several kilobytes of
+  // stack.  But the stack safety zone should account for that.
+  // See bugs 4446381, 4468289, 4497237.
+  if (C->need_stack_bang(bangsize)) {
+    __ arm_stack_overflow_check(bangsize, Rtemp);
+  }
+
+  __ raw_push(FP, LR);
+  if (framesize != 0) {
+    __ sub_slow(SP, SP, framesize);
+  }
+
+  // offset from scratch buffer is not valid
+  if (strcmp(cbuf.name(), "Compile::Fill_buffer") == 0) {
+    C->set_frame_complete( __ offset() );
+  }
+
+  if (C->has_mach_constant_base_node()) {
+    // NOTE: We set the table base offset here because users might be
+    // emitted before MachConstantBaseNode.
+    Compile::ConstantTable& constant_table = C->constant_table();
+    constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
+  }
+}
+
+uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
+  return MachNode::size(ra_);
+}
+
+int MachPrologNode::reloc() const {
+  return 10; // a large enough number
+}
+
+//=============================================================================
+#ifndef PRODUCT
+void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+  Compile* C = ra_->C;
+
+  size_t framesize = C->frame_size_in_bytes();
+  framesize -= 2*wordSize;
+
+  if (framesize != 0) {
+    st->print("ADD    R_SP, R_SP, " SIZE_FORMAT "\n\t",framesize);
+  }
+  st->print("POP    R_FP|R_LR_LR");
+
+  if (do_polling() && ra_->C->is_method_compilation()) {
+    st->print("\n\t");
+#ifdef AARCH64
+    if (MacroAssembler::page_reachable_from_cache(os::get_polling_page())) {
+      st->print("ADRP     Rtemp, #PollAddr\t! Load Polling address\n\t");
+      st->print("LDR      ZR,[Rtemp + #PollAddr & 0xfff]\t!Poll for Safepointing");
+    } else {
+      st->print("mov_slow Rtemp, #PollAddr\t! Load Polling address\n\t");
+      st->print("LDR      ZR,[Rtemp]\t!Poll for Safepointing");
+    }
+#else
+    st->print("MOV    Rtemp, #PollAddr\t! Load Polling address\n\t");
+    st->print("LDR    Rtemp,[Rtemp]\t!Poll for Safepointing");
+#endif
+  }
+}
+#endif
+
+void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+  Compile* C = ra_->C;
+
+  size_t framesize = C->frame_size_in_bytes();
+  framesize -= 2*wordSize;
+  if (framesize != 0) {
+    __ add_slow(SP, SP, framesize);
+  }
+  __ raw_pop(FP, LR);
+
+  // If this does safepoint polling, then do it here
+  if (do_polling() && ra_->C->is_method_compilation()) {
+#ifdef AARCH64
+    if (false && MacroAssembler::page_reachable_from_cache(os::get_polling_page())) {
+/* FIXME: TODO
+      __ relocate(relocInfo::xxx);
+      __ adrp(Rtemp, (intptr_t)os::get_polling_page());
+      __ relocate(relocInfo::poll_return_type);
+      int offset = os::get_polling_page() & 0xfff;
+      __ ldr(ZR, Address(Rtemp + offset));
+*/
+    } else {
+      __ mov_address(Rtemp, (address)os::get_polling_page(), symbolic_Relocation::polling_page_reference);
+      __ relocate(relocInfo::poll_return_type);
+      __ ldr(ZR, Address(Rtemp));
+    }
+#else
+    // mov_slow here is usually one or two instruction
+    __ mov_address(Rtemp, (address)os::get_polling_page(), symbolic_Relocation::polling_page_reference);
+    __ relocate(relocInfo::poll_return_type);
+    __ ldr(Rtemp, Address(Rtemp));
+#endif
+  }
+}
+
+uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
+#ifdef AARCH64
+  // allow for added alignment nop from mov_address bind_literal
+  return MachNode::size(ra_) + 1 * Assembler::InstructionSize;
+#else
+  return MachNode::size(ra_);
+#endif
+}
+
+int MachEpilogNode::reloc() const {
+  return 16; // a large enough number
+}
+
+const Pipeline * MachEpilogNode::pipeline() const {
+  return MachNode::pipeline_class();
+}
+
+int MachEpilogNode::safepoint_offset() const {
+  assert( do_polling(), "no return for this epilog node");
+  //  return MacroAssembler::size_of_sethi(os::get_polling_page());
+  Unimplemented();
+  return 0;
+}
+
+//=============================================================================
+
+// Figure out which register class each belongs in: rc_int, rc_float, rc_stack
+enum RC { rc_bad, rc_int, rc_float, rc_stack };
+static enum RC rc_class( OptoReg::Name reg ) {
+  if (!OptoReg::is_valid(reg)) return rc_bad;
+  if (OptoReg::is_stack(reg)) return rc_stack;
+  VMReg r = OptoReg::as_VMReg(reg);
+  if (r->is_Register()) return rc_int;
+  assert(r->is_FloatRegister(), "must be");
+  return rc_float;
+}
+
+static inline bool is_iRegLd_memhd(OptoReg::Name src_first, OptoReg::Name src_second, int offset) {
+#ifdef AARCH64
+  return is_memoryHD(offset);
+#else
+  int rlo = Matcher::_regEncode[src_first];
+  int rhi = Matcher::_regEncode[src_second];
+  if (!((rlo&1)==0 && (rlo+1 == rhi))) {
+    tty->print_cr("CAUGHT BAD LDRD/STRD");
+  }
+  return (rlo&1)==0 && (rlo+1 == rhi) && is_memoryHD(offset);
+#endif
+}
+
+uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
+                                        PhaseRegAlloc *ra_,
+                                        bool do_size,
+                                        outputStream* st ) const {
+  // Get registers to move
+  OptoReg::Name src_second = ra_->get_reg_second(in(1));
+  OptoReg::Name src_first = ra_->get_reg_first(in(1));
+  OptoReg::Name dst_second = ra_->get_reg_second(this );
+  OptoReg::Name dst_first = ra_->get_reg_first(this );
+
+  enum RC src_second_rc = rc_class(src_second);
+  enum RC src_first_rc = rc_class(src_first);
+  enum RC dst_second_rc = rc_class(dst_second);
+  enum RC dst_first_rc = rc_class(dst_first);
+
+  assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
+
+  // Generate spill code!
+  int size = 0;
+
+  if (src_first == dst_first && src_second == dst_second)
+    return size;            // Self copy, no move
+
+#ifdef TODO
+  if (bottom_type()->isa_vect() != NULL) {
+  }
+#endif
+
+  // Shared code does not expect instruction set capability based bailouts here.
+  // Handle offset unreachable bailout with minimal change in shared code.
+  // Bailout only for real instruction emit.
+  // This requires a single comment change in shared code. ( see output.cpp "Normal" instruction case )
+
+  MacroAssembler _masm(cbuf);
+
+  // --------------------------------------
+  // Check for mem-mem move.  Load into unused float registers and fall into
+  // the float-store case.
+  if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
+    int offset = ra_->reg2offset(src_first);
+    if (cbuf && !is_memoryfp(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      if (src_second_rc != rc_bad) {
+        assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
+        src_first     = OptoReg::Name(R_mem_copy_lo_num);
+        src_second    = OptoReg::Name(R_mem_copy_hi_num);
+        src_first_rc  = rc_float;
+        src_second_rc = rc_float;
+        if (cbuf) {
+          __ ldr_double(Rmemcopy, Address(SP, offset));
+        } else if (!do_size) {
+          st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
+        }
+      } else {
+        src_first     = OptoReg::Name(R_mem_copy_lo_num);
+        src_first_rc  = rc_float;
+        if (cbuf) {
+          __ ldr_float(Rmemcopy, Address(SP, offset));
+        } else if (!do_size) {
+          st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
+        }
+      }
+      size += 4;
+    }
+  }
+
+  if (src_second_rc == rc_stack && dst_second_rc == rc_stack) {
+    Unimplemented();
+  }
+
+  // --------------------------------------
+  // Check for integer reg-reg copy
+  if (src_first_rc == rc_int && dst_first_rc == rc_int) {
+    // Else normal reg-reg copy
+    assert( src_second != dst_first, "smashed second before evacuating it" );
+    if (cbuf) {
+      __ mov(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
+#ifndef PRODUCT
+    } else if (!do_size) {
+      st->print("MOV    R_%s, R_%s\t# spill",
+                Matcher::regName[dst_first],
+                Matcher::regName[src_first]);
+#endif
+    }
+#ifdef AARCH64
+    if (src_first+1 == src_second && dst_first+1 == dst_second) {
+      return size + 4;
+    }
+#endif
+    size += 4;
+  }
+
+  // Check for integer store
+  if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
+    int offset = ra_->reg2offset(dst_first);
+    if (cbuf && !is_memoryI(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      if (src_second_rc != rc_bad && is_iRegLd_memhd(src_first, src_second, offset)) {
+        assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
+        if (cbuf) {
+          __ str_64(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(STR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
+#endif
+        }
+        return size + 4;
+      } else {
+        if (cbuf) {
+          __ str_32(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(STR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first), offset);
+#endif
+        }
+      }
+    }
+    size += 4;
+  }
+
+  // Check for integer load
+  if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
+    int offset = ra_->reg2offset(src_first);
+    if (cbuf && !is_memoryI(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      if (src_second_rc != rc_bad && is_iRegLd_memhd(dst_first, dst_second, offset)) {
+        assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
+        if (cbuf) {
+          __ ldr_64(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(LDR_64 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
+#endif
+        }
+        return size + 4;
+      } else {
+        if (cbuf) {
+          __ ldr_32(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(LDR_32 "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first), offset);
+#endif
+        }
+      }
+    }
+    size += 4;
+  }
+
+  // Check for float reg-reg copy
+  if (src_first_rc == rc_float && dst_first_rc == rc_float) {
+    if (src_second_rc != rc_bad) {
+      assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
+      if (cbuf) {
+      __ mov_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
+#ifndef PRODUCT
+      } else if (!do_size) {
+        st->print(MOV_DOUBLE "    R_%s, R_%s\t# spill",
+                  Matcher::regName[dst_first],
+                  Matcher::regName[src_first]);
+#endif
+      }
+      return 4;
+    }
+    if (cbuf) {
+      __ mov_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
+#ifndef PRODUCT
+    } else if (!do_size) {
+      st->print(MOV_FLOAT "    R_%s, R_%s\t# spill",
+                Matcher::regName[dst_first],
+                Matcher::regName[src_first]);
+#endif
+    }
+    size = 4;
+  }
+
+  // Check for float store
+  if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
+    int offset = ra_->reg2offset(dst_first);
+    if (cbuf && !is_memoryfp(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      // Further check for aligned-adjacent pair, so we can use a double store
+      if (src_second_rc != rc_bad) {
+        assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
+        if (cbuf) {
+          __ str_double(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(STR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
+#endif
+        }
+        return size + 4;
+      } else {
+        if (cbuf) {
+          __ str_float(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(STR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
+#endif
+        }
+      }
+    }
+    size += 4;
+  }
+
+  // Check for float load
+  if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
+    int offset = ra_->reg2offset(src_first);
+    if (cbuf && !is_memoryfp(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      // Further check for aligned-adjacent pair, so we can use a double store
+      if (src_second_rc != rc_bad) {
+        assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
+        if (cbuf) {
+          __ ldr_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(LDR_DOUBLE "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
+#endif
+        }
+        return size + 4;
+      } else {
+        if (cbuf) {
+          __ ldr_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
+#ifndef PRODUCT
+        } else if (!do_size) {
+          if (size != 0) st->print("\n\t");
+          st->print(LDR_FLOAT "   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_first),offset);
+#endif
+        }
+      }
+    }
+    size += 4;
+  }
+
+  // check for int reg -> float reg move
+  if (src_first_rc == rc_int && dst_first_rc == rc_float) {
+    // Further check for aligned-adjacent pair, so we can use a single instruction
+    if (src_second_rc != rc_bad) {
+      assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
+      assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
+      assert(src_second_rc == rc_int && dst_second_rc == rc_float, "unsupported");
+      if (cbuf) {
+#ifdef AARCH64
+        __ fmov_dx(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
+#else
+        __ fmdrr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]), reg_to_register_object(Matcher::_regEncode[src_second]));
+#endif
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+#ifdef AARCH64
+        st->print("FMOV_DX   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
+#else
+        st->print("FMDRR   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first), OptoReg::regname(src_second));
+#endif
+#endif
+      }
+      return size + 4;
+    } else {
+      if (cbuf) {
+        __ fmsr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+        st->print(FMSR "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
+#endif
+      }
+      size += 4;
+    }
+  }
+
+  // check for float reg -> int reg move
+  if (src_first_rc == rc_float && dst_first_rc == rc_int) {
+    // Further check for aligned-adjacent pair, so we can use a single instruction
+    if (src_second_rc != rc_bad) {
+      assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
+      assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
+      assert(src_second_rc == rc_float && dst_second_rc == rc_int, "unsupported");
+      if (cbuf) {
+#ifdef AARCH64
+        __ fmov_xd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
+#else
+        __ fmrrd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
+#endif
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+#ifdef AARCH64
+        st->print("FMOV_XD R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
+#else
+        st->print("FMRRD   R_%s, R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(dst_second), OptoReg::regname(src_first));
+#endif
+#endif
+      }
+      return size + 4;
+    } else {
+      if (cbuf) {
+        __ fmrs(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+        st->print(FMRS "   R_%s, R_%s\t! spill",OptoReg::regname(dst_first), OptoReg::regname(src_first));
+#endif
+      }
+      size += 4;
+    }
+  }
+
+  // --------------------------------------------------------------------
+  // Check for hi bits still needing moving.  Only happens for misaligned
+  // arguments to native calls.
+  if (src_second == dst_second)
+    return size;               // Self copy; no move
+  assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
+
+#ifndef AARCH64
+  // Check for integer reg-reg copy.  Hi bits are stuck up in the top
+  // 32-bits of a 64-bit register, but are needed in low bits of another
+  // register (else it's a hi-bits-to-hi-bits copy which should have
+  // happened already as part of a 64-bit move)
+  if (src_second_rc == rc_int && dst_second_rc == rc_int) {
+    if (cbuf) {
+      __ mov(reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_register_object(Matcher::_regEncode[src_second]));
+#ifndef PRODUCT
+    } else if (!do_size) {
+      if (size != 0) st->print("\n\t");
+      st->print("MOV    R_%s, R_%s\t# spill high",
+                Matcher::regName[dst_second],
+                Matcher::regName[src_second]);
+#endif
+    }
+    return size+4;
+  }
+
+  // Check for high word integer store
+  if (src_second_rc == rc_int && dst_second_rc == rc_stack) {
+    int offset = ra_->reg2offset(dst_second);
+
+    if (cbuf && !is_memoryP(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      if (cbuf) {
+        __ str(reg_to_register_object(Matcher::_regEncode[src_second]), Address(SP, offset));
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+        st->print("STR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_second), offset);
+#endif
+      }
+    }
+    return size + 4;
+  }
+
+  // Check for high word integer load
+  if (dst_second_rc == rc_int && src_second_rc == rc_stack) {
+    int offset = ra_->reg2offset(src_second);
+    if (cbuf && !is_memoryP(offset)) {
+      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
+      return 0;
+    } else {
+      if (cbuf) {
+        __ ldr(reg_to_register_object(Matcher::_regEncode[dst_second]), Address(SP, offset));
+#ifndef PRODUCT
+      } else if (!do_size) {
+        if (size != 0) st->print("\n\t");
+        st->print("LDR   R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(dst_second), offset);
+#endif
+      }
+    }
+    return size + 4;
+  }
+#endif
+
+  Unimplemented();
+  return 0; // Mute compiler
+}
+
+#ifndef PRODUCT
+void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+  implementation( NULL, ra_, false, st );
+}
+#endif
+
+void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  implementation( &cbuf, ra_, false, NULL );
+}
+
+uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
+  return implementation( NULL, ra_, true, NULL );
+}
+
+//=============================================================================
+#ifndef PRODUCT
+void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
+  st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
+}
+#endif
+
+void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
+  MacroAssembler _masm(&cbuf);
+  for(int i = 0; i < _count; i += 1) {
+    __ nop();
+  }
+}
+
+uint MachNopNode::size(PhaseRegAlloc *ra_) const {
+  return 4 * _count;
+}
+
+
+//=============================================================================
+#ifndef PRODUCT
+void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg = ra_->get_reg_first(this);
+  st->print("ADD    %s,R_SP+#%d",Matcher::regName[reg], offset);
+}
+#endif
+
+void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg = ra_->get_encode(this);
+  Register dst = reg_to_register_object(reg);
+
+  if (is_aimm(offset)) {
+    __ add(dst, SP, offset);
+  } else {
+    __ mov_slow(dst, offset);
+#ifdef AARCH64
+    __ add(dst, SP, dst, ex_lsl);
+#else
+    __ add(dst, SP, dst);
+#endif
+  }
+}
+
+uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
+  // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
+  assert(ra_ == ra_->C->regalloc(), "sanity");
+  return ra_->C->scratch_emit_size(this);
+}
+
+//=============================================================================
+#ifndef PRODUCT
+#ifdef AARCH64
+#define R_RTEMP "R_R16"
+#else
+#define R_RTEMP "R_R12"
+#endif
+void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
+  st->print_cr("\nUEP:");
+  if (UseCompressedClassPointers) {
+    st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
+    st->print_cr("\tdecode_klass " R_RTEMP);
+  } else {
+    st->print_cr("\tLDR   " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
+  }
+  st->print_cr("\tCMP   " R_RTEMP ",R_R8" );
+  st->print   ("\tB.NE  SharedRuntime::handle_ic_miss_stub");
+}
+#endif
+
+void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+  Register iCache  = reg_to_register_object(Matcher::inline_cache_reg_encode());
+  assert(iCache == Ricklass, "should be");
+  Register receiver = R0;
+
+  __ load_klass(Rtemp, receiver);
+  __ cmp(Rtemp, iCache);
+#ifdef AARCH64
+  Label match;
+  __ b(match, eq);
+  __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
+  __ bind(match);
+#else
+  __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
+#endif
+}
+
+uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
+  return MachNode::size(ra_);
+}
+
+
+//=============================================================================
+
+// Emit exception handler code.
+int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
+  MacroAssembler _masm(&cbuf);
+
+  address base = __ start_a_stub(size_exception_handler());
+  if (base == NULL) {
+    ciEnv::current()->record_failure("CodeCache is full");
+    return 0;  // CodeBuffer::expand failed
+  }
+
+  int offset = __ offset();
+
+  // OK to trash LR, because exception blob will kill it
+  __ jump(OptoRuntime::exception_blob()->entry_point(), relocInfo::runtime_call_type, LR_tmp);
+
+  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
+
+  __ end_a_stub();
+
+  return offset;
+}
+
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
+  // Can't use any of the current frame's registers as we may have deopted
+  // at a poll and everything can be live.
+  MacroAssembler _masm(&cbuf);
+
+  address base = __ start_a_stub(size_deopt_handler());
+  if (base == NULL) {
+    ciEnv::current()->record_failure("CodeCache is full");
+    return 0;  // CodeBuffer::expand failed
+  }
+
+  int offset = __ offset();
+  address deopt_pc = __ pc();
+
+#ifdef AARCH64
+  // See LR saved by caller in sharedRuntime_arm.cpp
+  // see also hse1 ws
+  // see also LIR_Assembler::emit_deopt_handler
+
+  __ raw_push(LR, LR); // preserve LR in both slots
+  __ mov_relative_address(LR, deopt_pc);
+  __ str(LR, Address(SP, 1 * wordSize)); // save deopt PC
+  // OK to kill LR, because deopt blob will restore it from SP[0]
+  __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, LR_tmp);
+#else
+  __ sub(SP, SP, wordSize); // make room for saved PC
+  __ push(LR); // save LR that may be live when we get here
+  __ mov_relative_address(LR, deopt_pc);
+  __ str(LR, Address(SP, wordSize)); // save deopt PC
+  __ pop(LR); // restore LR
+  __ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
+#endif
+
+  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+
+  __ end_a_stub();
+  return offset;
+}
+
+const bool Matcher::match_rule_supported(int opcode) {
+  if (!has_match_rule(opcode))
+    return false;
+
+  switch (opcode) {
+  case Op_PopCountI:
+  case Op_PopCountL:
+    if (!UsePopCountInstruction)
+      return false;
+    break;
+  case Op_LShiftCntV:
+  case Op_RShiftCntV:
+  case Op_AddVB:
+  case Op_AddVS:
+  case Op_AddVI:
+  case Op_AddVL:
+  case Op_SubVB:
+  case Op_SubVS:
+  case Op_SubVI:
+  case Op_SubVL:
+  case Op_MulVS:
+  case Op_MulVI:
+  case Op_LShiftVB:
+  case Op_LShiftVS:
+  case Op_LShiftVI:
+  case Op_LShiftVL:
+  case Op_RShiftVB:
+  case Op_RShiftVS:
+  case Op_RShiftVI:
+  case Op_RShiftVL:
+  case Op_URShiftVB:
+  case Op_URShiftVS:
+  case Op_URShiftVI:
+  case Op_URShiftVL:
+  case Op_AndV:
+  case Op_OrV:
+  case Op_XorV:
+    return VM_Version::has_simd();
+  case Op_LoadVector:
+  case Op_StoreVector:
+  case Op_AddVF:
+  case Op_SubVF:
+  case Op_MulVF:
+#ifdef AARCH64
+    return VM_Version::has_simd();
+#else
+    return VM_Version::has_vfp() || VM_Version::has_simd();
+#endif
+  case Op_AddVD:
+  case Op_SubVD:
+  case Op_MulVD:
+  case Op_DivVF:
+  case Op_DivVD:
+#ifdef AARCH64
+    return VM_Version::has_simd();
+#else
+    return VM_Version::has_vfp();
+#endif
+  }
+
+  return true;  // Per default match rules are supported.
+}
+
+const bool Matcher::match_rule_supported_vector(int opcode, int vlen) {
+
+  // TODO
+  // identify extra cases that we might want to provide match rules for
+  // e.g. Op_ vector nodes and other intrinsics while guarding with vlen
+  bool ret_value = match_rule_supported(opcode);
+  // Add rules here.
+
+  return ret_value;  // Per default match rules are supported.
+}
+
+const bool Matcher::has_predicated_vectors(void) {
+  return false;
+}
+
+const int Matcher::float_pressure(int default_pressure_threshold) {
+  return default_pressure_threshold;
+}
+
+int Matcher::regnum_to_fpu_offset(int regnum) {
+  return regnum - 32; // The FP registers are in the second chunk
+}
+
+// Vector width in bytes
+const int Matcher::vector_width_in_bytes(BasicType bt) {
+  return MaxVectorSize;
+}
+
+// Vector ideal reg corresponding to specified size in bytes
+const int Matcher::vector_ideal_reg(int size) {
+  assert(MaxVectorSize >= size, "");
+  switch(size) {
+    case  8: return Op_VecD;
+    case 16: return Op_VecX;
+  }
+  ShouldNotReachHere();
+  return 0;
+}
+
+const int Matcher::vector_shift_count_ideal_reg(int size) {
+  return vector_ideal_reg(size);
+}
+
+// Limits on vector size (number of elements) loaded into vector.
+const int Matcher::max_vector_size(const BasicType bt) {
+  assert(is_java_primitive(bt), "only primitive type vectors");
+  return vector_width_in_bytes(bt)/type2aelembytes(bt);
+}
+
+const int Matcher::min_vector_size(const BasicType bt) {
+  assert(is_java_primitive(bt), "only primitive type vectors");
+  return 8/type2aelembytes(bt);
+}
+
+// ARM doesn't support misaligned vectors store/load.
+const bool Matcher::misaligned_vectors_ok() {
+  return false;
+}
+
+// ARM doesn't support AES intrinsics
+const bool Matcher::pass_original_key_for_aes() {
+  return false;
+}
+
+const bool Matcher::convL2FSupported(void) {
+#ifdef AARCH64
+  return true;
+#else
+  return false;
+#endif
+}
+
+// Is this branch offset short enough that a short branch can be used?
+//
+// NOTE: If the platform does not provide any short branch variants, then
+//       this method should return false for offset 0.
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
+  // The passed offset is relative to address of the branch.
+  // On ARM a branch displacement is calculated relative to address
+  // of the branch + 8.
+  //
+  // offset -= 8;
+  // return (Assembler::is_simm24(offset));
+  return false;
+}
+
+const bool Matcher::isSimpleConstant64(jlong value) {
+  // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
+#ifdef AARCH64
+  return (value == 0);
+#else
+  return false;
+#endif
+}
+
+// No scaling for the parameter the ClearArray node.
+const bool Matcher::init_array_count_is_in_bytes = true;
+
+#ifdef AARCH64
+const int Matcher::long_cmove_cost() { return 1; }
+#else
+// Needs 2 CMOV's for longs.
+const int Matcher::long_cmove_cost() { return 2; }
+#endif
+
+#ifdef AARCH64
+const int Matcher::float_cmove_cost() { return 1; }
+#else
+// CMOVF/CMOVD are expensive on ARM.
+const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
+#endif
+
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
+// Do we need to mask the count passed to shift instructions or does
+// the cpu only look at the lower 5/6 bits anyway?
+// FIXME: does this handle vector shifts as well?
+#ifdef AARCH64
+const bool Matcher::need_masked_shift_count = false;
+#else
+const bool Matcher::need_masked_shift_count = true;
+#endif
+
+const bool Matcher::convi2l_type_required = true;
+
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?
+bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
+  return clone_base_plus_offset_address(m, mstack, address_visited);
+}
+
+void Compile::reshape_address(AddPNode* addp) {
+}
+
+bool Matcher::narrow_oop_use_complex_address() {
+  NOT_LP64(ShouldNotCallThis());
+  assert(UseCompressedOops, "only for compressed oops code");
+  return false;
+}
+
+bool Matcher::narrow_klass_use_complex_address() {
+  NOT_LP64(ShouldNotCallThis());
+  assert(UseCompressedClassPointers, "only for compressed klass code");
+  return false;
+}
+
+bool Matcher::const_oop_prefer_decode() {
+  NOT_LP64(ShouldNotCallThis());
+  return true;
+}
+
+bool Matcher::const_klass_prefer_decode() {
+  NOT_LP64(ShouldNotCallThis());
+  return true;
+}
+
+// Is it better to copy float constants, or load them directly from memory?
+// Intel can load a float constant from a direct address, requiring no
+// extra registers.  Most RISCs will have to materialize an address into a
+// register first, so they would do better to copy the constant from stack.
+const bool Matcher::rematerialize_float_constants = false;
+
+// If CPU can load and store mis-aligned doubles directly then no fixup is
+// needed.  Else we split the double into 2 integer pieces and move it
+// piece-by-piece.  Only happens when passing doubles into C code as the
+// Java calling convention forces doubles to be aligned.
+#ifdef AARCH64
+// On stack replacement support:
+// We don't need Load[DL]_unaligned support, because interpreter stack
+// has correct alignment
+const bool Matcher::misaligned_doubles_ok = true;
+#else
+const bool Matcher::misaligned_doubles_ok = false;
+#endif
+
+// No-op on ARM.
+void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
+}
+
+// Advertise here if the CPU requires explicit rounding operations
+// to implement the UseStrictFP mode.
+const bool Matcher::strict_fp_requires_explicit_rounding = false;
+
+// Are floats converted to double when stored to stack during deoptimization?
+// ARM does not handle callee-save floats.
+bool Matcher::float_in_double() {
+  return false;
+}
+
+// Do ints take an entire long register or just half?
+// Note that we if-def off of _LP64.
+// The relevant question is how the int is callee-saved.  In _LP64
+// the whole long is written but de-opt'ing will have to extract
+// the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
+#ifdef _LP64
+const bool Matcher::int_in_long = true;
+#else
+const bool Matcher::int_in_long = false;
+#endif
+
+// Return whether or not this register is ever used as an argument.  This
+// function is used on startup to build the trampoline stubs in generateOptoStub.
+// Registers not mentioned will be killed by the VM call in the trampoline, and
+// arguments in those registers not be available to the callee.
+bool Matcher::can_be_java_arg( int reg ) {
+#ifdef AARCH64
+  if (reg >= R_R0_num && reg < R_R8_num) return true;
+  if (reg >= R_V0_num && reg <= R_V7b_num && ((reg & 3) < 2)) return true;
+#else
+  if (reg == R_R0_num ||
+      reg == R_R1_num ||
+      reg == R_R2_num ||
+      reg == R_R3_num) return true;
+
+  if (reg >= R_S0_num &&
+      reg <= R_S13_num) return true;
+#endif
+  return false;
+}
+
+bool Matcher::is_spillable_arg( int reg ) {
+  return can_be_java_arg(reg);
+}
+
+bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
+  return false;
+}
+
+// Register for DIVI projection of divmodI
+RegMask Matcher::divI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODI projection of divmodI
+RegMask Matcher::modI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for DIVL projection of divmodL
+RegMask Matcher::divL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODL projection of divmodL
+RegMask Matcher::modL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return FP_REGP_mask();
+}
+
+bool maybe_far_call(const CallNode *n) {
+  return !MacroAssembler::_reachable_from_cache(n->as_Call()->entry_point());
+}
+
+bool maybe_far_call(const MachCallNode *n) {
+  return !MacroAssembler::_reachable_from_cache(n->as_MachCall()->entry_point());
+}
+
+%}
+
+//----------ENCODING BLOCK-----------------------------------------------------
+// This block specifies the encoding classes used by the compiler to output
+// byte streams.  Encoding classes are parameterized macros used by
+// Machine Instruction Nodes in order to generate the bit encoding of the
+// instruction.  Operands specify their base encoding interface with the
+// interface keyword.  There are currently supported four interfaces,
+// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
+// operand to generate a function which returns its register number when
+// queried.   CONST_INTER causes an operand to generate a function which
+// returns the value of the constant when queried.  MEMORY_INTER causes an
+// operand to generate four functions which return the Base Register, the
+// Index Register, the Scale Value, and the Offset Value of the operand when
+// queried.  COND_INTER causes an operand to generate six functions which
+// return the encoding code (ie - encoding bits for the instruction)
+// associated with each basic boolean condition for a conditional instruction.
+//
+// Instructions specify two basic values for encoding.  Again, a function
+// is available to check if the constant displacement is an oop. They use the
+// ins_encode keyword to specify their encoding classes (which must be
+// a sequence of enc_class names, and their parameters, specified in
+// the encoding block), and they use the
+// opcode keyword to specify, in order, their primary, secondary, and
+// tertiary opcode.  Only the opcode sections which a particular instruction
+// needs for encoding need to be specified.
+encode %{
+  enc_class call_epilog %{
+    // nothing
+  %}
+
+  enc_class Java_To_Runtime (method meth) %{
+    // CALL directly to the runtime
+    emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
+  %}
+
+  enc_class Java_Static_Call (method meth) %{
+    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
+    // who we intended to call.
+
+    if ( !_method) {
+      emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
+    } else {
+      int method_index = resolved_method_index(cbuf);
+      RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
+                                                  : static_call_Relocation::spec(method_index);
+      emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
+
+      // Emit stubs for static call.
+      address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+      if (stub == NULL) {
+        ciEnv::current()->record_failure("CodeCache is full");
+        return;
+      }
+    }
+  %}
+
+  enc_class save_last_PC %{
+    // preserve mark
+    address mark = cbuf.insts()->mark();
+    debug_only(int off0 = cbuf.insts_size());
+    MacroAssembler _masm(&cbuf);
+    int ret_addr_offset = as_MachCall()->ret_addr_offset();
+    __ adr(LR, mark + ret_addr_offset);
+    __ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
+    debug_only(int off1 = cbuf.insts_size());
+    assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
+    // restore mark
+    cbuf.insts()->set_mark(mark);
+  %}
+
+  enc_class preserve_SP %{
+    // preserve mark
+    address mark = cbuf.insts()->mark();
+    debug_only(int off0 = cbuf.insts_size());
+    MacroAssembler _masm(&cbuf);
+    // FP is preserved across all calls, even compiled calls.
+    // Use it to preserve SP in places where the callee might change the SP.
+    __ mov(Rmh_SP_save, SP);
+    debug_only(int off1 = cbuf.insts_size());
+    assert(off1 - off0 == 4, "correct size prediction");
+    // restore mark
+    cbuf.insts()->set_mark(mark);
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ mov(SP, Rmh_SP_save);
+  %}
+
+  enc_class Java_Dynamic_Call (method meth) %{
+    MacroAssembler _masm(&cbuf);
+    Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
+    assert(R8_ic_reg == Ricklass, "should be");
+    __ set_inst_mark();
+#ifdef AARCH64
+// TODO: see C1 LIR_Assembler::ic_call()
+    InlinedAddress oop_literal((address)Universe::non_oop_word());
+    int offset = __ offset();
+    int fixed_size = mov_oop_size * 4;
+    if (VM_Version::prefer_moves_over_load_literal()) {
+      uintptr_t val = (uintptr_t)Universe::non_oop_word();
+      __ movz(R8_ic_reg, (val >>  0) & 0xffff,  0);
+      __ movk(R8_ic_reg, (val >> 16) & 0xffff, 16);
+      __ movk(R8_ic_reg, (val >> 32) & 0xffff, 32);
+      __ movk(R8_ic_reg, (val >> 48) & 0xffff, 48);
+    } else {
+      __ ldr_literal(R8_ic_reg, oop_literal);
+    }
+    assert(__ offset() - offset == fixed_size, "bad mov_oop size");
+#else
+    __ movw(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) & 0xffff);
+    __ movt(R8_ic_reg, ((unsigned int)Universe::non_oop_word()) >> 16);
+#endif
+    address  virtual_call_oop_addr = __ inst_mark();
+    // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
+    // who we intended to call.
+    int method_index = resolved_method_index(cbuf);
+    __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
+    emit_call_reloc(cbuf, as_MachCall(), $meth, RelocationHolder::none);
+#ifdef AARCH64
+    if (!VM_Version::prefer_moves_over_load_literal()) {
+      Label skip_literal;
+      __ b(skip_literal);
+      int off2 = __ offset();
+      __ bind_literal(oop_literal);
+      if (__ offset() - off2 == wordSize) {
+        // no padding, so insert nop for worst-case sizing
+        __ nop();
+      }
+      __ bind(skip_literal);
+    }
+#endif
+  %}
+
+  enc_class LdReplImmI(immI src, regD dst, iRegI tmp, int cnt, int wth) %{
+    // FIXME: load from constant table?
+    // Load a constant replicated "count" times with width "width"
+    int count = $cnt$$constant;
+    int width = $wth$$constant;
+    assert(count*width == 4, "sanity");
+    int val = $src$$constant;
+    if (width < 4) {
+      int bit_width = width * 8;
+      val &= (((int)1) << bit_width) - 1; // mask off sign bits
+      for (int i = 0; i < count - 1; i++) {
+        val |= (val << bit_width);
+      }
+    }
+    MacroAssembler _masm(&cbuf);
+
+    if (val == -1) {
+      __ mvn($tmp$$Register, 0);
+    } else if (val == 0) {
+      __ mov($tmp$$Register, 0);
+    } else {
+      __ movw($tmp$$Register, val & 0xffff);
+      __ movt($tmp$$Register, (unsigned int)val >> 16);
+    }
+    __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
+  %}
+
+  enc_class LdReplImmF(immF src, regD dst, iRegI tmp) %{
+    // Replicate float con 2 times and pack into vector (8 bytes) in regD.
+    float fval = $src$$constant;
+    int val = *((int*)&fval);
+    MacroAssembler _masm(&cbuf);
+
+    if (val == -1) {
+      __ mvn($tmp$$Register, 0);
+    } else if (val == 0) {
+      __ mov($tmp$$Register, 0);
+    } else {
+      __ movw($tmp$$Register, val & 0xffff);
+      __ movt($tmp$$Register, (unsigned int)val >> 16);
+    }
+    __ fmdrr($dst$$FloatRegister, $tmp$$Register, $tmp$$Register);
+  %}
+
+  enc_class enc_String_Compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result, iRegI tmp1, iRegI tmp2) %{
+    Label Ldone, Lloop;
+    MacroAssembler _masm(&cbuf);
+
+    Register   str1_reg = $str1$$Register;
+    Register   str2_reg = $str2$$Register;
+    Register   cnt1_reg = $cnt1$$Register; // int
+    Register   cnt2_reg = $cnt2$$Register; // int
+    Register   tmp1_reg = $tmp1$$Register;
+    Register   tmp2_reg = $tmp2$$Register;
+    Register result_reg = $result$$Register;
+
+    assert_different_registers(str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp1_reg, tmp2_reg);
+
+    // Compute the minimum of the string lengths(str1_reg) and the
+    // difference of the string lengths (stack)
+
+    // See if the lengths are different, and calculate min in str1_reg.
+    // Stash diff in tmp2 in case we need it for a tie-breaker.
+    __ subs_32(tmp2_reg, cnt1_reg, cnt2_reg);
+#ifdef AARCH64
+    Label Lskip;
+    __ _lsl_w(cnt1_reg, cnt1_reg, exact_log2(sizeof(jchar))); // scale the limit
+    __ b(Lskip, mi);
+    __ _lsl_w(cnt1_reg, cnt2_reg, exact_log2(sizeof(jchar))); // scale the limit
+    __ bind(Lskip);
+#else
+    __ mov(cnt1_reg, AsmOperand(cnt1_reg, lsl, exact_log2(sizeof(jchar)))); // scale the limit
+    __ mov(cnt1_reg, AsmOperand(cnt2_reg, lsl, exact_log2(sizeof(jchar))), pl); // scale the limit
+#endif
+
+    // reallocate cnt1_reg, cnt2_reg, result_reg
+    // Note:  limit_reg holds the string length pre-scaled by 2
+    Register limit_reg = cnt1_reg;
+    Register  chr2_reg = cnt2_reg;
+    Register  chr1_reg = tmp1_reg;
+    // str{12} are the base pointers
+
+    // Is the minimum length zero?
+    __ cmp_32(limit_reg, 0);
+    if (result_reg != tmp2_reg) {
+      __ mov(result_reg, tmp2_reg, eq);
+    }
+    __ b(Ldone, eq);
+
+    // Load first characters
+    __ ldrh(chr1_reg, Address(str1_reg, 0));
+    __ ldrh(chr2_reg, Address(str2_reg, 0));
+
+    // Compare first characters
+    __ subs(chr1_reg, chr1_reg, chr2_reg);
+    if (result_reg != chr1_reg) {
+      __ mov(result_reg, chr1_reg, ne);
+    }
+    __ b(Ldone, ne);
+
+    {
+      // Check after comparing first character to see if strings are equivalent
+      // Check if the strings start at same location
+      __ cmp(str1_reg, str2_reg);
+      // Check if the length difference is zero
+      __ cond_cmp(tmp2_reg, 0, eq);
+      __ mov(result_reg, 0, eq); // result is zero
+      __ b(Ldone, eq);
+      // Strings might not be equal
+    }
+
+    __ subs(chr1_reg, limit_reg, 1 * sizeof(jchar));
+    if (result_reg != tmp2_reg) {
+      __ mov(result_reg, tmp2_reg, eq);
+    }
+    __ b(Ldone, eq);
+
+    // Shift str1_reg and str2_reg to the end of the arrays, negate limit
+    __ add(str1_reg, str1_reg, limit_reg);
+    __ add(str2_reg, str2_reg, limit_reg);
+    __ neg(limit_reg, chr1_reg);  // limit = -(limit-2)
+
+    // Compare the rest of the characters
+    __ bind(Lloop);
+    __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
+    __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
+    __ subs(chr1_reg, chr1_reg, chr2_reg);
+    if (result_reg != chr1_reg) {
+      __ mov(result_reg, chr1_reg, ne);
+    }
+    __ b(Ldone, ne);
+
+    __ adds(limit_reg, limit_reg, sizeof(jchar));
+    __ b(Lloop, ne);
+
+    // If strings are equal up to min length, return the length difference.
+    if (result_reg != tmp2_reg) {
+      __ mov(result_reg, tmp2_reg);
+    }
+
+    // Otherwise, return the difference between the first mismatched chars.
+    __ bind(Ldone);
+  %}
+
+  enc_class enc_String_Equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2) %{
+    Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone, Lequal;
+    MacroAssembler _masm(&cbuf);
+
+    Register   str1_reg = $str1$$Register;
+    Register   str2_reg = $str2$$Register;
+    Register    cnt_reg = $cnt$$Register; // int
+    Register   tmp1_reg = $tmp1$$Register;
+    Register   tmp2_reg = $tmp2$$Register;
+    Register result_reg = $result$$Register;
+
+    assert_different_registers(str1_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, result_reg);
+
+    __ cmp(str1_reg, str2_reg); //same char[] ?
+    __ b(Lequal, eq);
+
+    __ cbz_32(cnt_reg, Lequal); // count == 0
+
+    //rename registers
+    Register limit_reg = cnt_reg;
+    Register  chr1_reg = tmp1_reg;
+    Register  chr2_reg = tmp2_reg;
+
+    __ logical_shift_left(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
+
+    //check for alignment and position the pointers to the ends
+    __ orr(chr1_reg, str1_reg, str2_reg);
+    __ tst(chr1_reg, 0x3);
+
+    // notZero means at least one not 4-byte aligned.
+    // We could optimize the case when both arrays are not aligned
+    // but it is not frequent case and it requires additional checks.
+    __ b(Lchar, ne);
+
+    // Compare char[] arrays aligned to 4 bytes.
+    __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
+                          chr1_reg, chr2_reg, Ldone);
+
+    __ b(Lequal); // equal
+
+    // char by char compare
+    __ bind(Lchar);
+    __ mov(result_reg, 0);
+    __ add(str1_reg, limit_reg, str1_reg);
+    __ add(str2_reg, limit_reg, str2_reg);
+    __ neg(limit_reg, limit_reg); //negate count
+
+    // Lchar_loop
+    __ bind(Lchar_loop);
+    __ ldrh(chr1_reg, Address(str1_reg, limit_reg));
+    __ ldrh(chr2_reg, Address(str2_reg, limit_reg));
+    __ cmp(chr1_reg, chr2_reg);
+    __ b(Ldone, ne);
+    __ adds(limit_reg, limit_reg, sizeof(jchar));
+    __ b(Lchar_loop, ne);
+
+    __ bind(Lequal);
+    __ mov(result_reg, 1);  //equal
+
+    __ bind(Ldone);
+  %}
+
+  enc_class enc_Array_Equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result) %{
+    Label Lvector, Ldone, Lloop, Lequal;
+    MacroAssembler _masm(&cbuf);
+
+    Register   ary1_reg = $ary1$$Register;
+    Register   ary2_reg = $ary2$$Register;
+    Register   tmp1_reg = $tmp1$$Register;
+    Register   tmp2_reg = $tmp2$$Register;
+    Register   tmp3_reg = $tmp3$$Register;
+    Register result_reg = $result$$Register;
+
+    assert_different_registers(ary1_reg, ary2_reg, tmp1_reg, tmp2_reg, tmp3_reg, result_reg);
+
+    int length_offset  = arrayOopDesc::length_offset_in_bytes();
+    int base_offset    = arrayOopDesc::base_offset_in_bytes(T_CHAR);
+
+    // return true if the same array
+#ifdef AARCH64
+    __ cmp(ary1_reg, ary2_reg);
+    __ b(Lequal, eq);
+
+    __ mov(result_reg, 0);
+
+    __ cbz(ary1_reg, Ldone); // not equal
+
+    __ cbz(ary2_reg, Ldone); // not equal
+#else
+    __ teq(ary1_reg, ary2_reg);
+    __ mov(result_reg, 1, eq);
+    __ b(Ldone, eq); // equal
+
+    __ tst(ary1_reg, ary1_reg);
+    __ mov(result_reg, 0, eq);
+    __ b(Ldone, eq);    // not equal
+
+    __ tst(ary2_reg, ary2_reg);
+    __ mov(result_reg, 0, eq);
+    __ b(Ldone, eq);    // not equal
+#endif
+
+    //load the lengths of arrays
+    __ ldr_s32(tmp1_reg, Address(ary1_reg, length_offset)); // int
+    __ ldr_s32(tmp2_reg, Address(ary2_reg, length_offset)); // int
+
+    // return false if the two arrays are not equal length
+#ifdef AARCH64
+    __ cmp_w(tmp1_reg, tmp2_reg);
+    __ b(Ldone, ne);    // not equal
+
+    __ cbz_w(tmp1_reg, Lequal); // zero-length arrays are equal
+#else
+    __ teq_32(tmp1_reg, tmp2_reg);
+    __ mov(result_reg, 0, ne);
+    __ b(Ldone, ne);    // not equal
+
+    __ tst(tmp1_reg, tmp1_reg);
+    __ mov(result_reg, 1, eq);
+    __ b(Ldone, eq);    // zero-length arrays are equal
+#endif
+
+    // load array addresses
+    __ add(ary1_reg, ary1_reg, base_offset);
+    __ add(ary2_reg, ary2_reg, base_offset);
+
+    // renaming registers
+    Register chr1_reg  =  tmp3_reg;   // for characters in ary1
+    Register chr2_reg  =  tmp2_reg;   // for characters in ary2
+    Register limit_reg =  tmp1_reg;   // length
+
+    // set byte count
+    __ logical_shift_left_32(limit_reg, limit_reg, exact_log2(sizeof(jchar)));
+
+    // Compare char[] arrays aligned to 4 bytes.
+    __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
+                          chr1_reg, chr2_reg, Ldone);
+    __ bind(Lequal);
+    __ mov(result_reg, 1);  //equal
+
+    __ bind(Ldone);
+    %}
+%}
+
+//----------FRAME--------------------------------------------------------------
+// Definition of frame structure and management information.
+//
+//  S T A C K   L A Y O U T    Allocators stack-slot number
+//                             |   (to get allocators register number
+//  G  Owned by    |        |  v    add VMRegImpl::stack0)
+//  r   CALLER     |        |
+//  o     |        +--------+      pad to even-align allocators stack-slot
+//  w     V        |  pad0  |        numbers; owned by CALLER
+//  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
+//  h     ^        |   in   |  5
+//        |        |  args  |  4   Holes in incoming args owned by SELF
+//  |     |        |        |  3
+//  |     |        +--------+
+//  V     |        | old out|      Empty on Intel, window on Sparc
+//        |    old |preserve|      Must be even aligned.
+//        |     SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
+//        |        |   in   |  3   area for Intel ret address
+//     Owned by    |preserve|      Empty on Sparc.
+//       SELF      +--------+
+//        |        |  pad2  |  2   pad to align old SP
+//        |        +--------+  1
+//        |        | locks  |  0
+//        |        +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
+//        |        |  pad1  | 11   pad to align new SP
+//        |        +--------+
+//        |        |        | 10
+//        |        | spills |  9   spills
+//        V        |        |  8   (pad0 slot for callee)
+//      -----------+--------+----> Matcher::_out_arg_limit, unaligned
+//        ^        |  out   |  7
+//        |        |  args  |  6   Holes in outgoing args owned by CALLEE
+//     Owned by    +--------+
+//      CALLEE     | new out|  6   Empty on Intel, window on Sparc
+//        |    new |preserve|      Must be even-aligned.
+//        |     SP-+--------+----> Matcher::_new_SP, even aligned
+//        |        |        |
+//
+// Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
+//         known from SELF's arguments and the Java calling convention.
+//         Region 6-7 is determined per call site.
+// Note 2: If the calling convention leaves holes in the incoming argument
+//         area, those holes are owned by SELF.  Holes in the outgoing area
+//         are owned by the CALLEE.  Holes should not be nessecary in the
+//         incoming area, as the Java calling convention is completely under
+//         the control of the AD file.  Doubles can be sorted and packed to
+//         avoid holes.  Holes in the outgoing arguments may be nessecary for
+//         varargs C calling conventions.
+// Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
+//         even aligned with pad0 as needed.
+//         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
+//         region 6-11 is even aligned; it may be padded out more so that
+//         the region from SP to FP meets the minimum stack alignment.
+
+frame %{
+  // What direction does stack grow in (assumed to be same for native & Java)
+  stack_direction(TOWARDS_LOW);
+
+  // These two registers define part of the calling convention
+  // between compiled code and the interpreter.
+  inline_cache_reg(R_Ricklass);          // Inline Cache Register or Method* for I2C
+  interpreter_method_oop_reg(R_Rmethod); // Method Oop Register when calling interpreter
+
+  // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
+  cisc_spilling_operand_name(indOffset);
+
+  // Number of stack slots consumed by a Monitor enter
+  sync_stack_slots(1 * VMRegImpl::slots_per_word);
+
+  // Compiled code's Frame Pointer
+#ifdef AARCH64
+  frame_pointer(R_SP);
+#else
+  frame_pointer(R_R13);
+#endif
+
+  // Stack alignment requirement
+  stack_alignment(StackAlignmentInBytes);
+  //  LP64: Alignment size in bytes (128-bit -> 16 bytes)
+  // !LP64: Alignment size in bytes (64-bit  ->  8 bytes)
+
+  // Number of stack slots between incoming argument block and the start of
+  // a new frame.  The PROLOG must add this many slots to the stack.  The
+  // EPILOG must remove this many slots.
+  // FP + LR
+  in_preserve_stack_slots(2 * VMRegImpl::slots_per_word);
+
+  // Number of outgoing stack slots killed above the out_preserve_stack_slots
+  // for calls to C.  Supports the var-args backing area for register parms.
+  // ADLC doesn't support parsing expressions, so I folded the math by hand.
+  varargs_C_out_slots_killed( 0);
+
+  // The after-PROLOG location of the return address.  Location of
+  // return address specifies a type (REG or STACK) and a number
+  // representing the register number (i.e. - use a register name) or
+  // stack slot.
+  // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
+  // Otherwise, it is above the locks and verification slot and alignment word
+  return_addr(STACK - 1*VMRegImpl::slots_per_word +
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
+
+  // Body of function which returns an OptoRegs array locating
+  // arguments either in registers or in stack slots for calling
+  // java
+  calling_convention %{
+    (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
+
+  %}
+
+  // Body of function which returns an OptoRegs array locating
+  // arguments either in registers or in stack slots for callin
+  // C.
+  c_calling_convention %{
+    // This is obviously always outgoing
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
+  %}
+
+  // Location of compiled Java return values.  Same as C
+  return_value %{
+    return c2::return_value(ideal_reg);
+  %}
+
+%}
+
+//----------ATTRIBUTES---------------------------------------------------------
+//----------Instruction Attributes---------------------------------------------
+ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
+ins_attrib ins_size(32);           // Required size attribute (in bits)
+ins_attrib ins_short_branch(0);    // Required flag: is this instruction a
+                                   // non-matching short branch variant of some
+                                                            // long branch?
+
+//----------OPERANDS-----------------------------------------------------------
+// Operand definitions must precede instruction definitions for correct parsing
+// in the ADLC because operands constitute user defined types which are used in
+// instruction definitions.
+
+//----------Simple Operands----------------------------------------------------
+// Immediate Operands
+// Integer Immediate: 32-bit
+operand immI() %{
+  match(ConI);
+
+  op_cost(0);
+  // formats are generated automatically for constants and base registers
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 8-bit unsigned - for VMOV
+operand immU8() %{
+  predicate(0 <= n->get_int() && (n->get_int() <= 255));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 16-bit
+operand immI16() %{
+  predicate((n->get_int() >> 16) == 0 && VM_Version::supports_movw());
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+#ifndef AARCH64
+// Integer Immediate: offset for half and double word loads and stores
+operand immIHD() %{
+  predicate(is_memoryHD(n->get_int()));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: offset for fp loads and stores
+operand immIFP() %{
+  predicate(is_memoryfp(n->get_int()) && ((n->get_int() & 3) == 0));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+#endif
+
+// Valid scale values for addressing modes and shifts
+operand immU5() %{
+  predicate(0 <= n->get_int() && (n->get_int() <= 31));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 6-bit
+operand immU6Big() %{
+  predicate(n->get_int() >= 32 && n->get_int() <= 63);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 0-bit
+operand immI0() %{
+  predicate(n->get_int() == 0);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 1
+operand immI_1() %{
+  predicate(n->get_int() == 1);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 2
+operand immI_2() %{
+  predicate(n->get_int() == 2);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 3
+operand immI_3() %{
+  predicate(n->get_int() == 3);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 4
+operand immI_4() %{
+  predicate(n->get_int() == 4);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 8
+operand immI_8() %{
+  predicate(n->get_int() == 8);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Int Immediate non-negative
+operand immU31()
+%{
+  predicate(n->get_int() >= 0);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the values 32-63
+operand immI_32_63() %{
+  predicate(n->get_int() >= 32 && n->get_int() <= 63);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Immediates for special shifts (sign extend)
+
+// Integer Immediate: the value 16
+operand immI_16() %{
+  predicate(n->get_int() == 16);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 24
+operand immI_24() %{
+  predicate(n->get_int() == 24);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 255
+operand immI_255() %{
+  predicate( n->get_int() == 255 );
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: the value 65535
+operand immI_65535() %{
+  predicate(n->get_int() == 65535);
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediates for arithmetic instructions
+
+operand aimmI() %{
+  predicate(is_aimm(n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand aimmIneg() %{
+  predicate(is_aimm(-n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand aimmU31() %{
+  predicate((0 <= n->get_int()) && is_aimm(n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediates for logical instructions
+
+operand limmI() %{
+  predicate(is_limmI(n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand limmIlow8() %{
+  predicate(is_limmI_low(n->get_int(), 8));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand limmU31() %{
+  predicate(0 <= n->get_int() && is_limmI(n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand limmIn() %{
+  predicate(is_limmI(~n->get_int()));
+  match(ConI);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+#ifdef AARCH64
+// Long Immediate: for logical instruction
+operand limmL() %{
+  predicate(is_limmL(n->get_long()));
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand limmLn() %{
+  predicate(is_limmL(~n->get_long()));
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: for arithmetic instruction
+operand aimmL() %{
+  predicate(is_aimm(n->get_long()));
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand aimmLneg() %{
+  predicate(is_aimm(-n->get_long()));
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+#endif // AARCH64
+
+// Long Immediate: the value FF
+operand immL_FF() %{
+  predicate( n->get_long() == 0xFFL );
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: the value FFFF
+operand immL_FFFF() %{
+  predicate( n->get_long() == 0xFFFFL );
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate: 32 or 64-bit
+operand immP() %{
+  match(ConP);
+
+  op_cost(5);
+  // formats are generated automatically for constants and base registers
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immP0() %{
+  predicate(n->get_ptr() == 0);
+  match(ConP);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immP_poll() %{
+  predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
+  match(ConP);
+
+  // formats are generated automatically for constants and base registers
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate
+operand immN()
+%{
+  match(ConN);
+
+  op_cost(10);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immNKlass()
+%{
+  match(ConNKlass);
+
+  op_cost(10);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN0()
+%{
+  predicate(n->get_narrowcon() == 0);
+  match(ConN);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL() %{
+  match(ConL);
+  op_cost(40);
+  // formats are generated automatically for constants and base registers
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL0() %{
+  predicate(n->get_long() == 0L);
+  match(ConL);
+  op_cost(0);
+  // formats are generated automatically for constants and base registers
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 16-bit
+operand immL16() %{
+  predicate(n->get_long() >= 0 && n->get_long() < (1<<16)  && VM_Version::supports_movw());
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: low 32-bit mask
+operand immL_32bits() %{
+  predicate(n->get_long() == 0xFFFFFFFFL);
+  match(ConL);
+  op_cost(0);
+
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Double Immediate
+operand immD() %{
+  match(ConD);
+
+  op_cost(40);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Double Immediate: +0.0d.
+operand immD0() %{
+  predicate(jlong_cast(n->getd()) == 0);
+
+  match(ConD);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand imm8D() %{
+  predicate(Assembler::double_num(n->getd()).can_be_imm8());
+  match(ConD);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float Immediate
+operand immF() %{
+  match(ConF);
+
+  op_cost(20);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float Immediate: +0.0f
+operand immF0() %{
+  predicate(jint_cast(n->getf()) == 0);
+  match(ConF);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float Immediate: encoded as 8 bits
+operand imm8F() %{
+  predicate(Assembler::float_num(n->getf()).can_be_imm8());
+  match(ConF);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Register Operands
+// Integer Register
+operand iRegI() %{
+  constraint(ALLOC_IN_RC(int_reg));
+  match(RegI);
+  match(R0RegI);
+  match(R1RegI);
+  match(R2RegI);
+  match(R3RegI);
+#ifdef AARCH64
+  match(ZRRegI);
+#else
+  match(R12RegI);
+#endif
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer Register
+operand iRegP() %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(RegP);
+  match(R0RegP);
+  match(R1RegP);
+  match(R2RegP);
+  match(RExceptionRegP);
+  match(R8RegP);
+  match(R9RegP);
+  match(RthreadRegP); // FIXME: move to sp_ptr_RegP?
+  match(R12RegP);
+  match(LRRegP);
+
+  match(sp_ptr_RegP);
+  match(store_ptr_RegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// GPRs + Rthread + SP
+operand sp_ptr_RegP() %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(RegP);
+  match(iRegP);
+  match(SPRegP); // FIXME: check cost
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+#ifdef AARCH64
+// Like sp_ptr_reg, but exclude regs (Aarch64 SP) that can't be
+// stored directly.  Includes ZR, so can't be used as a destination.
+operand store_ptr_RegP() %{
+  constraint(ALLOC_IN_RC(store_ptr_reg));
+  match(RegP);
+  match(iRegP);
+  match(ZRRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand store_RegI() %{
+  constraint(ALLOC_IN_RC(store_reg));
+  match(RegI);
+  match(iRegI);
+  match(ZRRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand store_RegL() %{
+  constraint(ALLOC_IN_RC(store_ptr_reg));
+  match(RegL);
+  match(iRegL);
+  match(ZRRegL);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand store_RegN() %{
+  constraint(ALLOC_IN_RC(store_reg));
+  match(RegN);
+  match(iRegN);
+  match(ZRRegN);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+#endif
+
+operand R0RegP() %{
+  constraint(ALLOC_IN_RC(R0_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R1RegP() %{
+  constraint(ALLOC_IN_RC(R1_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R2RegP() %{
+  constraint(ALLOC_IN_RC(R2_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand RExceptionRegP() %{
+  constraint(ALLOC_IN_RC(Rexception_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand RthreadRegP() %{
+  constraint(ALLOC_IN_RC(Rthread_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand IPRegP() %{
+  constraint(ALLOC_IN_RC(IP_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand LRRegP() %{
+  constraint(ALLOC_IN_RC(LR_regP));
+  match(iRegP);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R0RegI() %{
+  constraint(ALLOC_IN_RC(R0_regI));
+  match(iRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R1RegI() %{
+  constraint(ALLOC_IN_RC(R1_regI));
+  match(iRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R2RegI() %{
+  constraint(ALLOC_IN_RC(R2_regI));
+  match(iRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R3RegI() %{
+  constraint(ALLOC_IN_RC(R3_regI));
+  match(iRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+#ifndef AARCH64
+operand R12RegI() %{
+  constraint(ALLOC_IN_RC(R12_regI));
+  match(iRegI);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+#endif
+
+// Long Register
+operand iRegL() %{
+  constraint(ALLOC_IN_RC(long_reg));
+  match(RegL);
+#ifdef AARCH64
+  match(iRegLd);
+#else
+  match(R0R1RegL);
+  match(R2R3RegL);
+#endif
+//match(iRegLex);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand iRegLd() %{
+  constraint(ALLOC_IN_RC(long_reg_align));
+  match(iRegL); // FIXME: allows unaligned R11/R12?
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+#ifndef AARCH64
+// first long arg, or return value
+operand R0R1RegL() %{
+  constraint(ALLOC_IN_RC(R0R1_regL));
+  match(iRegL);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand R2R3RegL() %{
+  constraint(ALLOC_IN_RC(R2R3_regL));
+  match(iRegL);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+#endif
+
+// Condition Code Flag Register
+operand flagsReg() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr" %}
+  interface(REG_INTER);
+%}
+
+// Result of compare to 0 (TST)
+operand flagsReg_EQNELTGE() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr_EQNELTGE" %}
+  interface(REG_INTER);
+%}
+
+// Condition Code Register, unsigned comparisons.
+operand flagsRegU() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+#ifdef TODO
+  match(RegFlagsP);
+#endif
+
+  format %{ "apsr_U" %}
+  interface(REG_INTER);
+%}
+
+// Condition Code Register, pointer comparisons.
+operand flagsRegP() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr_P" %}
+  interface(REG_INTER);
+%}
+
+// Condition Code Register, long comparisons.
+#ifndef AARCH64
+operand flagsRegL_LTGE() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr_L_LTGE" %}
+  interface(REG_INTER);
+%}
+
+operand flagsRegL_EQNE() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr_L_EQNE" %}
+  interface(REG_INTER);
+%}
+
+operand flagsRegL_LEGT() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  format %{ "apsr_L_LEGT" %}
+  interface(REG_INTER);
+%}
+#endif
+
+// Condition Code Register, floating comparisons, unordered same as "less".
+operand flagsRegF() %{
+  constraint(ALLOC_IN_RC(float_flags));
+  match(RegFlags);
+
+  format %{ "fpscr_F" %}
+  interface(REG_INTER);
+%}
+
+// Vectors
+operand vecD() %{
+  constraint(ALLOC_IN_RC(actual_dflt_reg));
+  match(VecD);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand vecX() %{
+  constraint(ALLOC_IN_RC(vectorx_reg));
+  match(VecX);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand regD() %{
+  constraint(ALLOC_IN_RC(actual_dflt_reg));
+  match(RegD);
+  match(regD_low);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand regF() %{
+  constraint(ALLOC_IN_RC(sflt_reg));
+  match(RegF);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand regD_low() %{
+  constraint(ALLOC_IN_RC(dflt_low_reg));
+  match(RegD);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Special Registers
+
+// Method Register
+operand inline_cache_regP(iRegP reg) %{
+  constraint(ALLOC_IN_RC(Ricklass_regP));
+  match(reg);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand interpreter_method_oop_regP(iRegP reg) %{
+  constraint(ALLOC_IN_RC(Rmethod_regP));
+  match(reg);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+
+//----------Complex Operands---------------------------------------------------
+// Indirect Memory Reference
+operand indirect(sp_ptr_RegP reg) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(reg);
+
+  op_cost(100);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+#ifdef AARCH64
+// Indirect with scaled*1 uimm12 offset
+operand indOffsetU12ScaleB(sp_ptr_RegP reg, immUL12 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with scaled*2 uimm12 offset
+operand indOffsetU12ScaleS(sp_ptr_RegP reg, immUL12x2 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with scaled*4 uimm12 offset
+operand indOffsetU12ScaleI(sp_ptr_RegP reg, immUL12x4 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with scaled*8 uimm12 offset
+operand indOffsetU12ScaleL(sp_ptr_RegP reg, immUL12x8 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with scaled*16 uimm12 offset
+operand indOffsetU12ScaleQ(sp_ptr_RegP reg, immUL12x16 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+#else // ! AARCH64
+
+// Indirect with Offset in ]-4096, 4096[
+operand indOffset12(sp_ptr_RegP reg, immI12 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with offset for float load/store
+operand indOffsetFP(sp_ptr_RegP reg, immIFP offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with Offset for half and double words
+operand indOffsetHD(sp_ptr_RegP reg, immIHD offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with Offset and Offset+4 in ]-1024, 1024[
+operand indOffsetFPx2(sp_ptr_RegP reg, immX10x2 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with Offset and Offset+4 in ]-4096, 4096[
+operand indOffset12x2(sp_ptr_RegP reg, immI12x2 offset) %{
+  constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(AddP reg offset);
+
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+#ifdef AARCH64
+    index(0xff); // 0xff => no index
+#else
+    index(0xf); // PC => no index
+#endif
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+#endif // !AARCH64
+
+// Indirect with Register Index
+operand indIndex(iRegP addr, iRegX index) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr index);
+
+  op_cost(100);
+  format %{ "[$addr + $index]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+#ifdef AARCH64
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScaleS(iRegP addr, iRegX index, immI_1 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX index scale));
+
+  op_cost(100);
+  format %{"[$addr + $index << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus 32-bit Index Register
+operand indIndexIScaleS(iRegP addr, iRegI index, immI_1 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX (ConvI2L index) scale));
+
+  op_cost(100);
+  format %{"[$addr + $index.w << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x7fffffff); // sxtw
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScaleI(iRegP addr, iRegX index, immI_2 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX index scale));
+
+  op_cost(100);
+  format %{"[$addr + $index << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus 32-bit Index Register
+operand indIndexIScaleI(iRegP addr, iRegI index, immI_2 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX (ConvI2L index) scale));
+
+  op_cost(100);
+  format %{"[$addr + $index.w << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x7fffffff); // sxtw
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScaleL(iRegP addr, iRegX index, immI_3 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX index scale));
+
+  op_cost(100);
+  format %{"[$addr + $index << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus 32-bit Index Register
+operand indIndexIScaleL(iRegP addr, iRegI index, immI_3 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX (ConvI2L index) scale));
+
+  op_cost(100);
+  format %{"[$addr + $index.w << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x7fffffff); // sxtw
+  %}
+%}
+
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScaleQ(iRegP addr, iRegX index, immI_4 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX index scale));
+
+  op_cost(100);
+  format %{"[$addr + $index << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+// Indirect Memory Times Scale Plus 32-bit Index Register
+operand indIndexIScaleQ(iRegP addr, iRegI index, immI_4 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX (ConvI2L index) scale));
+
+  op_cost(100);
+  format %{"[$addr + $index.w << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x7fffffff); // sxtw
+  %}
+%}
+#else
+// Indirect Memory Times Scale Plus Index Register
+operand indIndexScale(iRegP addr, iRegX index, immU5 scale) %{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP addr (LShiftX index scale));
+
+  op_cost(100);
+  format %{"[$addr + $index << $scale]" %}
+  interface(MEMORY_INTER) %{
+    base($addr);
+    index($index);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+#endif
+
+// Operands for expressing Control Flow
+// NOTE:  Label is a predefined operand which should not be redefined in
+//        the AD file.  It is generically handled within the ADLC.
+
+//----------Conditional Branch Operands----------------------------------------
+// Comparison Op  - This is the operation of the comparison, and is limited to
+//                  the following set of codes:
+//                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
+//
+// Other attributes of the comparison, such as unsignedness, are specified
+// by the comparison instruction that sets a condition code flags register.
+// That result is represented by a flags operand whose subtype is appropriate
+// to the unsignedness (etc.) of the comparison.
+//
+// Later, the instruction which matches both the Comparison Op (a Bool) and
+// the flags (produced by the Cmp) specifies the coding of the comparison op
+// by matching a specific subtype of Bool operand below, such as cmpOpU.
+
+operand cmpOp() %{
+  match(Bool);
+
+  format %{ "" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0xb);
+    greater_equal(0xa);
+    less_equal(0xd);
+    greater(0xc);
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+// integer comparison with 0, signed
+operand cmpOp0() %{
+  match(Bool);
+
+  format %{ "" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0x4);
+    greater_equal(0x5);
+    less_equal(0xd); // unsupported
+    greater(0xc); // unsupported
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+// Comparison Op, unsigned
+operand cmpOpU() %{
+  match(Bool);
+
+  format %{ "u" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0x3);
+    greater_equal(0x2);
+    less_equal(0x9);
+    greater(0x8);
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+// Comparison Op, pointer (same as unsigned)
+operand cmpOpP() %{
+  match(Bool);
+
+  format %{ "p" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0x3);
+    greater_equal(0x2);
+    less_equal(0x9);
+    greater(0x8);
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+operand cmpOpL() %{
+  match(Bool);
+
+  format %{ "L" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0xb);
+    greater_equal(0xa);
+    less_equal(0xd);
+    greater(0xc);
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+operand cmpOpL_commute() %{
+  match(Bool);
+
+  format %{ "L" %}
+  interface(COND_INTER) %{
+    equal(0x0);
+    not_equal(0x1);
+    less(0xc);
+    greater_equal(0xd);
+    less_equal(0xa);
+    greater(0xb);
+    overflow(0x0); // unsupported/unimplemented
+    no_overflow(0x0); // unsupported/unimplemented
+  %}
+%}
+
+//----------OPERAND CLASSES----------------------------------------------------
+// Operand Classes are groups of operands that are used to simplify
+// instruction definitions by not requiring the AD writer to specify separate
+// instructions for every form of operand when the instruction accepts
+// multiple operand types with the same basic encoding and format.  The classic
+// case of this is memory operands.
+#ifdef AARCH64
+opclass memoryB(indirect, indIndex, indOffsetU12ScaleB);
+opclass memoryS(indirect, indIndex, indIndexScaleS, indIndexIScaleS, indOffsetU12ScaleS);
+opclass memoryI(indirect, indIndex, indIndexScaleI, indIndexIScaleI, indOffsetU12ScaleI);
+opclass memoryL(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
+opclass memoryP(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
+opclass memoryQ(indirect, indIndex, indIndexScaleQ, indIndexIScaleQ, indOffsetU12ScaleQ);
+opclass memoryF(indirect, indIndex, indIndexScaleI, indIndexIScaleI, indOffsetU12ScaleI);
+opclass memoryD(indirect, indIndex, indIndexScaleL, indIndexIScaleL, indOffsetU12ScaleL);
+
+opclass memoryScaledS(indIndexScaleS, indIndexIScaleS);
+opclass memoryScaledI(indIndexScaleI, indIndexIScaleI);
+opclass memoryScaledL(indIndexScaleL, indIndexIScaleL);
+opclass memoryScaledP(indIndexScaleL, indIndexIScaleL);
+opclass memoryScaledQ(indIndexScaleQ, indIndexIScaleQ);
+opclass memoryScaledF(indIndexScaleI, indIndexIScaleI);
+opclass memoryScaledD(indIndexScaleL, indIndexIScaleL);
+// when ldrex/strex is used:
+opclass memoryex ( indirect );
+opclass indIndexMemory( indIndex );
+opclass memoryvld ( indirect /* , write back mode not implemented */ );
+
+#else
+
+opclass memoryI ( indirect, indOffset12, indIndex, indIndexScale );
+opclass memoryP ( indirect, indOffset12, indIndex, indIndexScale );
+opclass memoryF ( indirect, indOffsetFP );
+opclass memoryF2 ( indirect, indOffsetFPx2 );
+opclass memoryD ( indirect, indOffsetFP );
+opclass memoryfp( indirect, indOffsetFP );
+opclass memoryB ( indirect, indIndex, indOffsetHD );
+opclass memoryS ( indirect, indIndex, indOffsetHD );
+opclass memoryL ( indirect, indIndex, indOffsetHD );
+
+opclass memoryScaledI(indIndexScale);
+opclass memoryScaledP(indIndexScale);
+
+// when ldrex/strex is used:
+opclass memoryex ( indirect );
+opclass indIndexMemory( indIndex );
+opclass memorylong ( indirect, indOffset12x2 );
+opclass memoryvld ( indirect /* , write back mode not implemented */ );
+#endif
+
+//----------PIPELINE-----------------------------------------------------------
+pipeline %{
+
+//----------ATTRIBUTES---------------------------------------------------------
+attributes %{
+  fixed_size_instructions;           // Fixed size instructions
+  max_instructions_per_bundle = 4;   // Up to 4 instructions per bundle
+  instruction_unit_size = 4;         // An instruction is 4 bytes long
+  instruction_fetch_unit_size = 16;  // The processor fetches one line
+  instruction_fetch_units = 1;       // of 16 bytes
+
+  // List of nop instructions
+  nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
+%}
+
+//----------RESOURCES----------------------------------------------------------
+// Resources are the functional units available to the machine
+resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
+
+//----------PIPELINE DESCRIPTION-----------------------------------------------
+// Pipeline Description specifies the stages in the machine's pipeline
+
+pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
+
+//----------PIPELINE CLASSES---------------------------------------------------
+// Pipeline Classes describe the stages in which input and output are
+// referenced by the hardware pipeline.
+
+// Integer ALU reg-reg operation
+pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+    single_instruction;
+    dst   : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg long operation
+pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
+    instruction_count(2);
+    dst   : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg long dependent operation
+pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    cr    : E(write);
+    IALU  : R(2);
+%}
+
+// Integer ALU reg-imm operaion
+pipe_class ialu_reg_imm(iRegI dst, iRegI src1) %{
+    single_instruction;
+    dst   : E(write);
+    src1  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg operation with condition code
+pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
+    single_instruction;
+    dst   : E(write);
+    cr    : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU zero-reg operation
+pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
+    single_instruction;
+    dst   : E(write);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU zero-reg operation with condition code only
+pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
+    single_instruction;
+    cr    : E(write);
+    src   : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg operation with condition code only
+pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
+    single_instruction;
+    cr    : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-imm operation with condition code only
+pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1) %{
+    single_instruction;
+    cr    : E(write);
+    src1  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg-zero operation with condition code only
+pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
+    single_instruction;
+    cr    : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-imm-zero operation with condition code only
+pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI0 zero) %{
+    single_instruction;
+    cr    : E(write);
+    src1  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg operation with condition code, src1 modified
+pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
+    single_instruction;
+    cr    : E(write);
+    src1  : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
+    multiple_bundles;
+    dst   : E(write)+4;
+    cr    : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R(3);
+    BR    : R(2);
+%}
+
+// Integer ALU operation
+pipe_class ialu_none(iRegI dst) %{
+    single_instruction;
+    dst   : E(write);
+    IALU  : R;
+%}
+
+// Integer ALU reg operation
+pipe_class ialu_reg(iRegI dst, iRegI src) %{
+    single_instruction; may_have_no_code;
+    dst   : E(write);
+    src   : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU reg conditional operation
+// This instruction has a 1 cycle stall, and cannot execute
+// in the same cycle as the instruction setting the condition
+// code. We kludge this by pretending to read the condition code
+// 1 cycle earlier, and by marking the functional units as busy
+// for 2 cycles with the result available 1 cycle later than
+// is really the case.
+pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
+    single_instruction;
+    op2_out : C(write);
+    op1     : R(read);
+    cr      : R(read);       // This is really E, with a 1 cycle stall
+    BR      : R(2);
+    MS      : R(2);
+%}
+
+// Integer ALU reg operation
+pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
+    single_instruction; may_have_no_code;
+    dst   : E(write);
+    src   : R(read);
+    IALU  : R;
+%}
+pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
+    single_instruction; may_have_no_code;
+    dst   : E(write);
+    src   : R(read);
+    IALU  : R;
+%}
+
+// Two integer ALU reg operations
+pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
+    instruction_count(2);
+    dst   : E(write);
+    src   : R(read);
+    A0    : R;
+    A1    : R;
+%}
+
+// Two integer ALU reg operations
+pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
+    instruction_count(2); may_have_no_code;
+    dst   : E(write);
+    src   : R(read);
+    A0    : R;
+    A1    : R;
+%}
+
+// Integer ALU imm operation
+pipe_class ialu_imm(iRegI dst) %{
+    single_instruction;
+    dst   : E(write);
+    IALU  : R;
+%}
+
+pipe_class ialu_imm_n(iRegI dst) %{
+    single_instruction;
+    dst   : E(write);
+    IALU  : R;
+%}
+
+// Integer ALU reg-reg with carry operation
+pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
+    single_instruction;
+    dst   : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU cc operation
+pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
+    single_instruction;
+    dst   : E(write);
+    cc    : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU cc / second IALU operation
+pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
+    instruction_count(1); multiple_bundles;
+    dst   : E(write)+1;
+    src   : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU cc / second IALU operation
+pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
+    instruction_count(1); multiple_bundles;
+    dst   : E(write)+1;
+    p     : R(read);
+    q     : R(read);
+    IALU  : R;
+%}
+
+// Integer ALU hi-lo-reg operation
+pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
+    instruction_count(1); multiple_bundles;
+    dst   : E(write)+1;
+    IALU  : R(2);
+%}
+
+// Long Constant
+pipe_class loadConL( iRegL dst, immL src ) %{
+    instruction_count(2); multiple_bundles;
+    dst   : E(write)+1;
+    IALU  : R(2);
+    IALU  : R(2);
+%}
+
+// Pointer Constant
+pipe_class loadConP( iRegP dst, immP src ) %{
+    instruction_count(0); multiple_bundles;
+    fixed_latency(6);
+%}
+
+// Polling Address
+pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
+    dst   : E(write);
+    IALU  : R;
+%}
+
+// Long Constant small
+pipe_class loadConLlo( iRegL dst, immL src ) %{
+    instruction_count(2);
+    dst   : E(write);
+    IALU  : R;
+    IALU  : R;
+%}
+
+// [PHH] This is wrong for 64-bit.  See LdImmF/D.
+pipe_class loadConFD(regF dst, immF src, iRegP tmp) %{
+    instruction_count(1); multiple_bundles;
+    src   : R(read);
+    dst   : M(write)+1;
+    IALU  : R;
+    MS    : E;
+%}
+
+// Integer ALU nop operation
+pipe_class ialu_nop() %{
+    single_instruction;
+    IALU  : R;
+%}
+
+// Integer ALU nop operation
+pipe_class ialu_nop_A0() %{
+    single_instruction;
+    A0    : R;
+%}
+
+// Integer ALU nop operation
+pipe_class ialu_nop_A1() %{
+    single_instruction;
+    A1    : R;
+%}
+
+// Integer Multiply reg-reg operation
+pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+    single_instruction;
+    dst   : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    MS    : R(5);
+%}
+
+pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+    single_instruction;
+    dst   : E(write)+4;
+    src1  : R(read);
+    src2  : R(read);
+    MS    : R(6);
+%}
+
+// Integer Divide reg-reg
+pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : E(write);
+    temp  : E(write);
+    src1  : R(read);
+    src2  : R(read);
+    temp  : R(read);
+    MS    : R(38);
+%}
+
+// Long Divide
+pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+    dst  : E(write)+71;
+    src1 : R(read);
+    src2 : R(read)+1;
+    MS   : R(70);
+%}
+
+// Floating Point Add Float
+pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FA    : R;
+%}
+
+// Floating Point Add Double
+pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FA    : R;
+%}
+
+// Floating Point Conditional Move based on integer flags
+pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    cr    : R(read);
+    FA    : R(2);
+    BR    : R(2);
+%}
+
+// Floating Point Conditional Move based on integer flags
+pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    cr    : R(read);
+    FA    : R(2);
+    BR    : R(2);
+%}
+
+// Floating Point Multiply Float
+pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FM    : R;
+%}
+
+// Floating Point Multiply Double
+pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FM    : R;
+%}
+
+// Floating Point Divide Float
+pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FM    : R;
+    FDIV  : C(14);
+%}
+
+// Floating Point Divide Double
+pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
+    single_instruction;
+    dst   : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FM    : R;
+    FDIV  : C(17);
+%}
+
+// Floating Point Move/Negate/Abs Float
+pipe_class faddF_reg(regF dst, regF src) %{
+    single_instruction;
+    dst   : W(write);
+    src   : E(read);
+    FA    : R(1);
+%}
+
+// Floating Point Move/Negate/Abs Double
+pipe_class faddD_reg(regD dst, regD src) %{
+    single_instruction;
+    dst   : W(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert F->D
+pipe_class fcvtF2D(regD dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert I->D
+pipe_class fcvtI2D(regD dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert LHi->D
+pipe_class fcvtLHi2D(regD dst, regD src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert L->D
+pipe_class fcvtL2D(regD dst, iRegL src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert L->F
+pipe_class fcvtL2F(regF dst, iRegL src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert D->F
+pipe_class fcvtD2F(regD dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert I->L
+pipe_class fcvtI2L(regD dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert D->F
+pipe_class fcvtD2I(iRegI dst, regD src, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : X(write)+6;
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert D->L
+pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : X(write)+6;
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert F->I
+pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : X(write)+6;
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert F->L
+pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
+    instruction_count(1); multiple_bundles;
+    dst   : X(write)+6;
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Convert I->F
+pipe_class fcvtI2F(regF dst, regF src) %{
+    single_instruction;
+    dst   : X(write);
+    src   : E(read);
+    FA    : R;
+%}
+
+// Floating Point Compare
+pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
+    single_instruction;
+    cr    : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FA    : R;
+%}
+
+// Floating Point Compare
+pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
+    single_instruction;
+    cr    : X(write);
+    src1  : E(read);
+    src2  : E(read);
+    FA    : R;
+%}
+
+// Floating Add Nop
+pipe_class fadd_nop() %{
+    single_instruction;
+    FA  : R;
+%}
+
+// Integer Store to Memory
+pipe_class istore_mem_reg(memoryI mem, iRegI src) %{
+    single_instruction;
+    mem   : R(read);
+    src   : C(read);
+    MS    : R;
+%}
+
+// Integer Store to Memory
+pipe_class istore_mem_spORreg(memoryI mem, sp_ptr_RegP src) %{
+    single_instruction;
+    mem   : R(read);
+    src   : C(read);
+    MS    : R;
+%}
+
+// Float Store
+pipe_class fstoreF_mem_reg(memoryF mem, RegF src) %{
+    single_instruction;
+    mem : R(read);
+    src : C(read);
+    MS  : R;
+%}
+
+// Float Store
+pipe_class fstoreF_mem_zero(memoryF mem, immF0 src) %{
+    single_instruction;
+    mem : R(read);
+    MS  : R;
+%}
+
+// Double Store
+pipe_class fstoreD_mem_reg(memoryD mem, RegD src) %{
+    instruction_count(1);
+    mem : R(read);
+    src : C(read);
+    MS  : R;
+%}
+
+// Double Store
+pipe_class fstoreD_mem_zero(memoryD mem, immD0 src) %{
+    single_instruction;
+    mem : R(read);
+    MS  : R;
+%}
+
+// Integer Load (when sign bit propagation not needed)
+pipe_class iload_mem(iRegI dst, memoryI mem) %{
+    single_instruction;
+    mem : R(read);
+    dst : C(write);
+    MS  : R;
+%}
+
+// Integer Load (when sign bit propagation or masking is needed)
+pipe_class iload_mask_mem(iRegI dst, memoryI mem) %{
+    single_instruction;
+    mem : R(read);
+    dst : M(write);
+    MS  : R;
+%}
+
+// Float Load
+pipe_class floadF_mem(regF dst, memoryF mem) %{
+    single_instruction;
+    mem : R(read);
+    dst : M(write);
+    MS  : R;
+%}
+
+// Float Load
+pipe_class floadD_mem(regD dst, memoryD mem) %{
+    instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
+    mem : R(read);
+    dst : M(write);
+    MS  : R;
+%}
+
+// Memory Nop
+pipe_class mem_nop() %{
+    single_instruction;
+    MS  : R;
+%}
+
+pipe_class sethi(iRegP dst, immI src) %{
+    single_instruction;
+    dst  : E(write);
+    IALU : R;
+%}
+
+pipe_class loadPollP(iRegP poll) %{
+    single_instruction;
+    poll : R(read);
+    MS   : R;
+%}
+
+pipe_class br(Universe br, label labl) %{
+    single_instruction_with_delay_slot;
+    BR  : R;
+%}
+
+pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
+    single_instruction_with_delay_slot;
+    cr    : E(read);
+    BR    : R;
+%}
+
+pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
+    single_instruction_with_delay_slot;
+    op1 : E(read);
+    BR  : R;
+    MS  : R;
+%}
+
+pipe_class br_nop() %{
+    single_instruction;
+    BR  : R;
+%}
+
+pipe_class simple_call(method meth) %{
+    instruction_count(2); multiple_bundles; force_serialization;
+    fixed_latency(100);
+    BR  : R(1);
+    MS  : R(1);
+    A0  : R(1);
+%}
+
+pipe_class compiled_call(method meth) %{
+    instruction_count(1); multiple_bundles; force_serialization;
+    fixed_latency(100);
+    MS  : R(1);
+%}
+
+pipe_class call(method meth) %{
+    instruction_count(0); multiple_bundles; force_serialization;
+    fixed_latency(100);
+%}
+
+pipe_class tail_call(Universe ignore, label labl) %{
+    single_instruction; has_delay_slot;
+    fixed_latency(100);
+    BR  : R(1);
+    MS  : R(1);
+%}
+
+pipe_class ret(Universe ignore) %{
+    single_instruction; has_delay_slot;
+    BR  : R(1);
+    MS  : R(1);
+%}
+
+// The real do-nothing guy
+pipe_class empty( ) %{
+    instruction_count(0);
+%}
+
+pipe_class long_memory_op() %{
+    instruction_count(0); multiple_bundles; force_serialization;
+    fixed_latency(25);
+    MS  : R(1);
+%}
+
+// Check-cast
+pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
+    array : R(read);
+    match  : R(read);
+    IALU   : R(2);
+    BR     : R(2);
+    MS     : R;
+%}
+
+// Convert FPU flags into +1,0,-1
+pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
+    src1  : E(read);
+    src2  : E(read);
+    dst   : E(write);
+    FA    : R;
+    MS    : R(2);
+    BR    : R(2);
+%}
+
+// Compare for p < q, and conditionally add y
+pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
+    p     : E(read);
+    q     : E(read);
+    y     : E(read);
+    IALU  : R(3)
+%}
+
+// Perform a compare, then move conditionally in a branch delay slot.
+pipe_class min_max( iRegI src2, iRegI srcdst ) %{
+    src2   : E(read);
+    srcdst : E(read);
+    IALU   : R;
+    BR     : R;
+%}
+
+// Define the class for the Nop node
+define %{
+   MachNop = ialu_nop;
+%}
+
+%}
+
+//----------INSTRUCTIONS-------------------------------------------------------
+
+//------------Special Nop instructions for bundling - no match rules-----------
+// Nop using the A0 functional unit
+instruct Nop_A0() %{
+  ins_pipe(ialu_nop_A0);
+%}
+
+// Nop using the A1 functional unit
+instruct Nop_A1( ) %{
+  ins_pipe(ialu_nop_A1);
+%}
+
+// Nop using the memory functional unit
+instruct Nop_MS( ) %{
+  ins_pipe(mem_nop);
+%}
+
+// Nop using the floating add functional unit
+instruct Nop_FA( ) %{
+  ins_pipe(fadd_nop);
+%}
+
+// Nop using the branch functional unit
+instruct Nop_BR( ) %{
+  ins_pipe(br_nop);
+%}
+
+//----------Load/Store/Move Instructions---------------------------------------
+//----------Load Instructions--------------------------------------------------
+// Load Byte (8bit signed)
+instruct loadB(iRegI dst, memoryB mem) %{
+  match(Set dst (LoadB mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRSB   $dst,$mem\t! byte -> int" %}
+  ins_encode %{
+    // High 32 bits are harmlessly set on Aarch64
+    __ ldrsb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Byte (8bit signed) into a Long Register
+instruct loadB2L(iRegL dst, memoryB mem) %{
+  match(Set dst (ConvI2L (LoadB mem)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRSB $dst,$mem\t! byte -> long"  %}
+  ins_encode %{
+    __ ldrsb($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRSB $dst.lo,$mem\t! byte -> long\n\t"
+            "ASR   $dst.hi,$dst.lo,31" %}
+  ins_encode %{
+    __ ldrsb($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
+  %}
+#endif
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Unsigned Byte (8bit UNsigned) into an int reg
+instruct loadUB(iRegI dst, memoryB mem) %{
+  match(Set dst (LoadUB mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRB   $dst,$mem\t! ubyte -> int" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load Unsigned Byte (8bit UNsigned) into a Long Register
+instruct loadUB2L(iRegL dst, memoryB mem) %{
+  match(Set dst (ConvI2L (LoadUB mem)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRB  $dst,$mem\t! ubyte -> long"  %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
+            "MOV   $dst.hi,0" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Unsigned Byte (8 bit UNsigned) with immediate mask into Long Register
+instruct loadUB2L_limmI(iRegL dst, memoryB mem, limmIlow8 mask) %{
+  match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
+
+#ifdef AARCH64
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+  size(8);
+  format %{ "LDRB  $dst,$mem\t! ubyte -> long\n\t"
+            "AND  $dst,$dst,$mask" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+    __ andr($dst$$Register, $dst$$Register, limmI_low($mask$$constant, 8));
+  %}
+#else
+  ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
+  size(12);
+  format %{ "LDRB  $dst.lo,$mem\t! ubyte -> long\n\t"
+            "MOV   $dst.hi,0\n\t"
+            "AND  $dst.lo,$dst.lo,$mask" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+    __ andr($dst$$Register, $dst$$Register, limmI_low($mask$$constant, 8));
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Short (16bit signed)
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadSoff(iRegI dst, memoryScaledS mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadS (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "LDRSH   $dst,$mem+$off\t! short temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldrsh($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+#endif
+
+instruct loadS(iRegI dst, memoryS mem) %{
+  match(Set dst (LoadS mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRSH   $dst,$mem\t! short" %}
+  ins_encode %{
+    __ ldrsh($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Short (16 bit signed) to Byte (8 bit signed)
+instruct loadS2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
+  match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+
+  format %{ "LDRSB   $dst,$mem\t! short -> byte" %}
+  ins_encode %{
+    // High 32 bits are harmlessly set on Aarch64
+    __ ldrsb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Short (16bit signed) into a Long Register
+instruct loadS2L(iRegL dst, memoryS mem) %{
+  match(Set dst (ConvI2L (LoadS mem)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRSH $dst,$mem\t! short -> long"  %}
+  ins_encode %{
+    __ ldrsh($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRSH $dst.lo,$mem\t! short -> long\n\t"
+            "ASR   $dst.hi,$dst.lo,31" %}
+  ins_encode %{
+    __ ldrsh($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
+  %}
+#endif
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned)
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadUSoff(iRegI dst, memoryScaledS mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadUS (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "LDRH   $dst,$mem+$off\t! ushort/char temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldrh($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+instruct loadUS(iRegI dst, memoryS mem) %{
+  match(Set dst (LoadUS mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRH   $dst,$mem\t! ushort/char" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
+instruct loadUS2B(iRegI dst, memoryB mem, immI_24 twentyfour) %{
+  match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRSB   $dst,$mem\t! ushort -> byte" %}
+  ins_encode %{
+    __ ldrsb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned) into a Long Register
+instruct loadUS2L(iRegL dst, memoryS mem) %{
+  match(Set dst (ConvI2L (LoadUS mem)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRH  $dst,$mem\t! short -> long"  %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRH  $dst.lo,$mem\t! short -> long\n\t"
+            "MOV   $dst.hi, 0" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
+instruct loadUS2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
+  match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRB  $dst,$mem"  %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRB  $dst.lo,$mem\t! \n\t"
+            "MOV   $dst.hi, 0" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned) with a immediate mask into a Long Register
+instruct loadUS2L_limmI(iRegL dst, memoryS mem, limmI mask) %{
+  match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
+#ifdef AARCH64
+  ins_cost(MEMORY_REF_COST + 1*DEFAULT_COST);
+
+  size(8);
+  format %{ "LDRH   $dst,$mem\t! ushort/char & mask -> long\n\t"
+            "AND    $dst,$dst,$mask" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+    __ andr($dst$$Register, $dst$$Register, (uintx)$mask$$constant);
+  %}
+#else
+  ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
+
+  size(12);
+  format %{ "LDRH   $dst,$mem\t! ushort/char & mask -> long\n\t"
+            "MOV    $dst.hi, 0\n\t"
+            "AND    $dst,$dst,$mask" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+    __ andr($dst$$Register, $dst$$Register, $mask$$constant);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Integer
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadIoff(iRegI dst, memoryScaledI mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadI (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "ldr_s32 $dst,$mem+$off\t! int temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr_s32($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+instruct loadI(iRegI dst, memoryI mem) %{
+  match(Set dst (LoadI mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "ldr_s32 $dst,$mem\t! int" %}
+  ins_encode %{
+    __ ldr_s32($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load Integer to Byte (8 bit signed)
+instruct loadI2B(iRegI dst, memoryS mem, immI_24 twentyfour) %{
+  match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+
+  format %{ "LDRSB   $dst,$mem\t! int -> byte" %}
+  ins_encode %{
+    __ ldrsb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Integer to Unsigned Byte (8 bit UNsigned)
+instruct loadI2UB(iRegI dst, memoryB mem, immI_255 mask) %{
+  match(Set dst (AndI (LoadI mem) mask));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+
+  format %{ "LDRB   $dst,$mem\t! int -> ubyte" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Integer to Short (16 bit signed)
+instruct loadI2S(iRegI dst, memoryS mem, immI_16 sixteen) %{
+  match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRSH   $dst,$mem\t! int -> short" %}
+  ins_encode %{
+    __ ldrsh($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Integer to Unsigned Short (16 bit UNsigned)
+instruct loadI2US(iRegI dst, memoryS mem, immI_65535 mask) %{
+  match(Set dst (AndI (LoadI mem) mask));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRH   $dst,$mem\t! int -> ushort/char" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Integer into a Long Register
+instruct loadI2L(iRegL dst, memoryI mem) %{
+  match(Set dst (ConvI2L (LoadI mem)));
+#ifdef AARCH64
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRSW $dst.lo,$mem\t! int -> long"  %}
+  ins_encode %{
+    __ ldr_s32($dst$$Register, $mem$$Address);
+  %}
+#else
+  ins_cost(MEMORY_REF_COST);
+
+  size(8);
+  format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
+            "ASR   $dst.hi,$dst.lo,31\t! int->long" %}
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), AsmOperand($dst$$Register, asr, 31));
+  %}
+#endif
+  ins_pipe(iload_mask_mem);
+%}
+
+// Load Integer with mask 0xFF into a Long Register
+instruct loadI2L_immI_255(iRegL dst, memoryB mem, immI_255 mask) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+#ifdef AARCH64
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDRB   $dst.lo,$mem\t! int & 0xFF -> long"  %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+  %}
+#else
+  ins_cost(MEMORY_REF_COST);
+
+  size(8);
+  format %{ "LDRB   $dst.lo,$mem\t! int & 0xFF -> long\n\t"
+            "MOV    $dst.hi, 0" %}
+  ins_encode %{
+    __ ldrb($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Integer with mask 0xFFFF into a Long Register
+instruct loadI2L_immI_65535(iRegL dst, memoryS mem, immI_65535 mask) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LDRH   $dst,$mem\t! int & 0xFFFF -> long" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDRH   $dst,$mem\t! int & 0xFFFF -> long\n\t"
+            "MOV    $dst.hi, 0" %}
+  ins_encode %{
+    __ ldrh($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mask_mem);
+%}
+
+#ifdef AARCH64
+// Load Integer with an immediate mask into a Long Register
+instruct loadI2L_limmI(iRegL dst, memoryI mem, limmI mask) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+  ins_cost(MEMORY_REF_COST + 1*DEFAULT_COST);
+
+  size(8);
+  format %{ "LDRSW $dst,$mem\t! int -> long\n\t"
+            "AND   $dst,$dst,$mask" %}
+
+  ins_encode %{
+    __ ldr_s32($dst$$Register, $mem$$Address);
+    __ andr($dst$$Register, $dst$$Register, (uintx)$mask$$constant);
+  %}
+  ins_pipe(iload_mem);
+%}
+#else
+// Load Integer with a 31-bit immediate mask into a Long Register
+instruct loadI2L_limmU31(iRegL dst, memoryI mem, limmU31 mask) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+  ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
+
+  size(12);
+  format %{ "LDR   $dst.lo,$mem\t! int -> long\n\t"
+            "MOV    $dst.hi, 0\n\t"
+            "AND   $dst,$dst,$mask" %}
+
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+    __ andr($dst$$Register, $dst$$Register, $mask$$constant);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+#ifdef AARCH64
+// Load Integer with mask into a Long Register
+// FIXME: use signedRegI mask, remove tmp?
+instruct loadI2L_immI(iRegL dst, memoryI mem, immI mask, iRegI tmp) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+  effect(TEMP dst, TEMP tmp);
+
+  ins_cost(MEMORY_REF_COST + 3*DEFAULT_COST);
+  format %{ "LDRSW    $mem,$dst\t! int & 31-bit mask -> long\n\t"
+            "MOV_SLOW $tmp,$mask\n\t"
+            "AND      $dst,$tmp,$dst" %}
+  ins_encode %{
+    __ ldrsw($dst$$Register, $mem$$Address);
+    __ mov_slow($tmp$$Register, $mask$$constant);
+    __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+#else
+// Load Integer with a 31-bit mask into a Long Register
+// FIXME: use iRegI mask, remove tmp?
+instruct loadI2L_immU31(iRegL dst, memoryI mem, immU31 mask, iRegI tmp) %{
+  match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
+  effect(TEMP dst, TEMP tmp);
+
+  ins_cost(MEMORY_REF_COST + 4*DEFAULT_COST);
+  size(20);
+  format %{ "LDR      $mem,$dst\t! int & 31-bit mask -> long\n\t"
+            "MOV      $dst.hi, 0\n\t"
+            "MOV_SLOW $tmp,$mask\n\t"
+            "AND      $dst,$tmp,$dst" %}
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+    __ mov_slow($tmp$$Register, $mask$$constant);
+    __ andr($dst$$Register, $dst$$Register, $tmp$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+// Load Unsigned Integer into a Long Register
+instruct loadUI2L(iRegL dst, memoryI mem, immL_32bits mask) %{
+  match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
+  ins_cost(MEMORY_REF_COST);
+
+#ifdef AARCH64
+//size(4);
+  format %{ "LDR_w $dst,$mem\t! uint -> long" %}
+  ins_encode %{
+    __ ldr_w($dst$$Register, $mem$$Address);
+  %}
+#else
+  size(8);
+  format %{ "LDR   $dst.lo,$mem\t! uint -> long\n\t"
+            "MOV   $dst.hi,0" %}
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+    __ mov($dst$$Register->successor(), 0);
+  %}
+#endif
+  ins_pipe(iload_mem);
+%}
+
+// Load Long
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadLoff(iRegLd dst, memoryScaledL mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadL (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "LDR    $dst,$mem+$off\t! long temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+instruct loadL(iRegLd dst, memoryL mem ) %{
+#ifdef AARCH64
+  // already atomic for Aarch64
+#else
+  predicate(!((LoadLNode*)n)->require_atomic_access());
+#endif
+  match(Set dst (LoadL mem));
+  effect(TEMP dst);
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "ldr_64  $dst,$mem\t! long" %}
+  ins_encode %{
+    __ ldr_64($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+#ifndef AARCH64
+instruct loadL_2instr(iRegL dst, memorylong mem ) %{
+  predicate(!((LoadLNode*)n)->require_atomic_access());
+  match(Set dst (LoadL mem));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+
+  size(8);
+  format %{ "LDR    $dst.lo,$mem \t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
+            "LDR    $dst.hi,$mem+4 or $mem" %}
+  ins_encode %{
+    Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
+
+    if ($dst$$Register == reg_to_register_object($mem$$base)) {
+      __ ldr($dst$$Register->successor(), Amemhi);
+      __ ldr($dst$$Register, Amemlo);
+    } else {
+      __ ldr($dst$$Register, Amemlo);
+      __ ldr($dst$$Register->successor(), Amemhi);
+    }
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct loadL_volatile(iRegL dst, indirect mem ) %{
+  predicate(((LoadLNode*)n)->require_atomic_access());
+  match(Set dst (LoadL mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDMIA    $dst,$mem\t! long" %}
+  ins_encode %{
+    // FIXME: why is ldmia considered atomic?  Should be ldrexd
+    RegisterSet set($dst$$Register);
+    set = set | reg_to_register_object($dst$$reg + 1);
+    __ ldmia(reg_to_register_object($mem$$base), set);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct loadL_volatile_fp(iRegL dst, memoryD mem ) %{
+  predicate(((LoadLNode*)n)->require_atomic_access());
+  match(Set dst (LoadL mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(8);
+  format %{ "FLDD      S14, $mem"
+            "FMRRD    $dst, S14\t! long \n't" %}
+  ins_encode %{
+    __ fldd(S14, $mem$$Address);
+    __ fmrrd($dst$$Register, $dst$$Register->successor(), S14);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct loadL_unaligned(iRegL dst, memorylong mem ) %{
+  match(Set dst (LoadL_unaligned mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(8);
+  format %{ "LDR    $dst.lo,$mem\t! long order of instrs reversed if $dst.lo == base($mem)\n\t"
+            "LDR    $dst.hi,$mem+4" %}
+  ins_encode %{
+    Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
+
+    if ($dst$$Register == reg_to_register_object($mem$$base)) {
+      __ ldr($dst$$Register->successor(), Amemhi);
+      __ ldr($dst$$Register, Amemlo);
+    } else {
+      __ ldr($dst$$Register, Amemlo);
+      __ ldr($dst$$Register->successor(), Amemhi);
+    }
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif // !AARCH64
+
+// Load Range
+instruct loadRange(iRegI dst, memoryI mem) %{
+  match(Set dst (LoadRange mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "LDR_u32 $dst,$mem\t! range" %}
+  ins_encode %{
+    __ ldr_u32($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+// Load Pointer
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadPoff(iRegP dst, memoryScaledP mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadP (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "LDR    $dst,$mem+$off\t! ptr temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+instruct loadP(iRegP dst, memoryP mem) %{
+  match(Set dst (LoadP mem));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "LDR   $dst,$mem\t! ptr" %}
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+#ifdef XXX
+// FIXME XXXX
+//instruct loadSP(iRegP dst, memoryP mem) %{
+instruct loadSP(SPRegP dst, memoryP mem, iRegP tmp) %{
+  match(Set dst (LoadP mem));
+  effect(TEMP tmp);
+  ins_cost(MEMORY_REF_COST+1);
+  size(8);
+
+  format %{ "LDR   $tmp,$mem\t! ptr\n\t"
+            "MOV   $dst,$tmp\t! ptr" %}
+  ins_encode %{
+    __ ldr($tmp$$Register, $mem$$Address);
+    __ mov($dst$$Register, $tmp$$Register);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+#ifdef _LP64
+// Load Compressed Pointer
+
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadNoff(iRegN dst, memoryScaledI mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadN (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "ldr_u32 $dst,$mem+$off\t! compressed ptr temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr_u32($dst$$Register, nmem);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct loadN(iRegN dst, memoryI mem) %{
+  match(Set dst (LoadN mem));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "ldr_u32 $dst,$mem\t! compressed ptr" %}
+  ins_encode %{
+    __ ldr_u32($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+// Load Klass Pointer
+instruct loadKlass(iRegP dst, memoryI mem) %{
+  match(Set dst (LoadKlass mem));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "LDR   $dst,$mem\t! klass ptr" %}
+  ins_encode %{
+    __ ldr($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+
+#ifdef _LP64
+// Load narrow Klass Pointer
+instruct loadNKlass(iRegN dst, memoryI mem) %{
+  match(Set dst (LoadNKlass mem));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "ldr_u32 $dst,$mem\t! compressed klass ptr" %}
+  ins_encode %{
+    __ ldr_u32($dst$$Register, $mem$$Address);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadDoff(regD dst, memoryScaledD mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadD (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "ldr    $dst,$mem+$off\t! double temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr_d($dst$$FloatRegister, nmem);
+  %}
+  ins_pipe(floadD_mem);
+%}
+#endif
+
+instruct loadD(regD dst, memoryD mem) %{
+  match(Set dst (LoadD mem));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
+  // only LDREXD and STREXD are 64-bit single-copy atomic
+  format %{ "FLDD   $dst,$mem" %}
+  ins_encode %{
+    __ ldr_double($dst$$FloatRegister, $mem$$Address);
+  %}
+  ins_pipe(floadD_mem);
+%}
+
+#ifndef AARCH64
+// Load Double - UNaligned
+instruct loadD_unaligned(regD_low dst, memoryF2 mem ) %{
+  match(Set dst (LoadD_unaligned mem));
+  ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
+  size(8);
+  format %{ "FLDS    $dst.lo,$mem\t! misaligned double\n"
+          "\tFLDS    $dst.hi,$mem+4\t!" %}
+  ins_encode %{
+    Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
+      __ flds($dst$$FloatRegister, Amemlo);
+      __ flds($dst$$FloatRegister->successor(), Amemhi);
+  %}
+  ins_pipe(iload_mem);
+%}
+#endif
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct loadFoff(regF dst, memoryScaledF mem, aimmX off, iRegP tmp) %{
+  match(Set dst (LoadF (AddP mem off)));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "ldr    $dst,$mem+$off\t! float temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ ldr_s($dst$$FloatRegister, nmem);
+  %}
+  ins_pipe(floadF_mem);
+%}
+#endif
+
+instruct loadF(regF dst, memoryF mem) %{
+  match(Set dst (LoadF mem));
+
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "FLDS    $dst,$mem" %}
+  ins_encode %{
+    __ ldr_float($dst$$FloatRegister, $mem$$Address);
+  %}
+  ins_pipe(floadF_mem);
+%}
+
+#ifdef AARCH64
+instruct load_limmI(iRegI dst, limmI src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST + 1); // + 1 because MOV is preferred
+  format %{ "ORR_w  $dst, ZR, $src\t! int"  %}
+  ins_encode %{
+    __ orr_w($dst$$Register, ZR, (uintx)$src$$constant);
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+// // Load Constant
+instruct loadConI( iRegI dst, immI src ) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 3/2);
+  format %{ "MOV_SLOW    $dst, $src" %}
+  ins_encode %{
+    __ mov_slow($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_hi_lo_reg);
+%}
+
+instruct loadConIMov( iRegI dst, immIMov src ) %{
+  match(Set dst src);
+  size(4);
+  format %{ "MOV    $dst, $src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+#ifndef AARCH64
+instruct loadConIMovn( iRegI dst, immIRotn src ) %{
+  match(Set dst src);
+  size(4);
+  format %{ "MVN    $dst, ~$src" %}
+  ins_encode %{
+    __ mvn($dst$$Register, ~$src$$constant);
+  %}
+  ins_pipe(ialu_imm_n);
+%}
+#endif
+
+instruct loadConI16( iRegI dst, immI16 src ) %{
+  match(Set dst src);
+  size(4);
+#ifdef AARCH64
+  format %{ "MOVZ_w  $dst, $src" %}
+#else
+  format %{ "MOVW    $dst, $src" %}
+#endif
+  ins_encode %{
+#ifdef AARCH64
+    __ mov_w($dst$$Register, $src$$constant);
+#else
+    __ movw($dst$$Register, $src$$constant);
+#endif
+  %}
+  ins_pipe(ialu_imm_n);
+%}
+
+instruct loadConP(iRegP dst, immP src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 3/2);
+  format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
+  ins_encode %{
+    relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
+    intptr_t val = $src$$constant;
+    if (constant_reloc == relocInfo::oop_type) {
+      __ mov_oop($dst$$Register, (jobject)val);
+    } else if (constant_reloc == relocInfo::metadata_type) {
+      __ mov_metadata($dst$$Register, (Metadata*)val);
+    } else {
+      __ mov_slow($dst$$Register, val);
+    }
+  %}
+  ins_pipe(loadConP);
+%}
+
+
+instruct loadConP_poll(iRegP dst, immP_poll src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  format %{ "MOV_SLOW    $dst,$src\t!ptr" %}
+  ins_encode %{
+      __ mov_slow($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(loadConP_poll);
+%}
+
+#ifdef AARCH64
+instruct loadConP0(iRegP dst, immP0 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  format %{ "MOV    $dst,ZR\t!ptr" %}
+  ins_encode %{
+    __ mov($dst$$Register, ZR);
+  %}
+  ins_pipe(ialu_none);
+%}
+
+instruct loadConN(iRegN dst, immN src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 3/2);
+  format %{ "SET    $dst,$src\t! compressed ptr" %}
+  ins_encode %{
+    Register dst = $dst$$Register;
+    // FIXME: use $constanttablebase?
+    __ set_narrow_oop(dst, (jobject)$src$$constant);
+  %}
+  ins_pipe(ialu_hi_lo_reg);
+%}
+
+instruct loadConN0(iRegN dst, immN0 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  format %{ "MOV    $dst,ZR\t! compressed ptr" %}
+  ins_encode %{
+    __ mov($dst$$Register, ZR);
+  %}
+  ins_pipe(ialu_none);
+%}
+
+instruct loadConNKlass(iRegN dst, immNKlass src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 3/2);
+  format %{ "SET    $dst,$src\t! compressed klass ptr" %}
+  ins_encode %{
+    Register dst = $dst$$Register;
+    // FIXME: use $constanttablebase?
+    __ set_narrow_klass(dst, (Klass*)$src$$constant);
+  %}
+  ins_pipe(ialu_hi_lo_reg);
+%}
+
+instruct load_limmL(iRegL dst, limmL src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  format %{ "ORR    $dst, ZR, $src\t! long"  %}
+  ins_encode %{
+    __ orr($dst$$Register, ZR, (uintx)$src$$constant);
+  %}
+  ins_pipe(loadConL);
+%}
+instruct load_immLMov(iRegL dst, immLMov src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  format %{ "MOV    $dst, $src\t! long"  %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(loadConL);
+%}
+instruct loadConL(iRegL dst, immL src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 4); // worst case
+  format %{ "mov_slow   $dst, $src\t! long"  %}
+  ins_encode %{
+    // FIXME: use $constanttablebase?
+    __ mov_slow($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(loadConL);
+%}
+#else
+instruct loadConL(iRegL dst, immL src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 4);
+  format %{ "MOV_SLOW   $dst.lo, $src & 0x0FFFFFFFFL \t! long\n\t"
+            "MOV_SLOW   $dst.hi, $src >> 32" %}
+  ins_encode %{
+    __ mov_slow(reg_to_register_object($dst$$reg), $src$$constant & 0x0FFFFFFFFL);
+    __ mov_slow(reg_to_register_object($dst$$reg + 1), ((julong)($src$$constant)) >> 32);
+  %}
+  ins_pipe(loadConL);
+%}
+
+instruct loadConL16( iRegL dst, immL16 src ) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 2);
+
+  size(8);
+  format %{ "MOVW    $dst.lo, $src \n\t"
+            "MOVW    $dst.hi, 0 \n\t" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant);
+    __ movw($dst$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+instruct loadConF_imm8(regF dst, imm8F src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  size(4);
+
+  format %{ "FCONSTS      $dst, $src"%}
+
+  ins_encode %{
+    __ fconsts($dst$$FloatRegister, Assembler::float_num($src$$constant).imm8());
+  %}
+  ins_pipe(loadConFD); // FIXME
+%}
+
+#ifdef AARCH64
+instruct loadIConF(iRegI dst, immF src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 2);
+
+  format %{ "MOV_SLOW  $dst, $src\t! loadIConF"  %}
+
+  ins_encode %{
+    // FIXME revisit once 6961697 is in
+    union {
+      jfloat f;
+      int i;
+    } v;
+    v.f = $src$$constant;
+    __ mov_slow($dst$$Register, v.i);
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+instruct loadConF(regF dst, immF src, iRegI tmp) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST * 2);
+  effect(TEMP tmp);
+  size(3*4);
+
+  format %{ "MOV_SLOW  $tmp, $src\n\t"
+            "FMSR      $dst, $tmp"%}
+
+  ins_encode %{
+    // FIXME revisit once 6961697 is in
+    union {
+      jfloat f;
+      int i;
+    } v;
+    v.f = $src$$constant;
+    __ mov_slow($tmp$$Register, v.i);
+    __ fmsr($dst$$FloatRegister, $tmp$$Register);
+  %}
+  ins_pipe(loadConFD); // FIXME
+%}
+
+instruct loadConD_imm8(regD dst, imm8D src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+  size(4);
+
+  format %{ "FCONSTD      $dst, $src"%}
+
+  ins_encode %{
+    __ fconstd($dst$$FloatRegister, Assembler::double_num($src$$constant).imm8());
+  %}
+  ins_pipe(loadConFD); // FIXME
+%}
+
+instruct loadConD(regD dst, immD src, iRegP tmp) %{
+  match(Set dst src);
+  effect(TEMP tmp);
+  ins_cost(MEMORY_REF_COST);
+  format %{ "FLDD  $dst, [$constanttablebase + $constantoffset]\t! load from constant table: double=$src" %}
+
+  ins_encode %{
+    Register r = $constanttablebase;
+    int offset  = $constantoffset($src);
+    if (!is_memoryD(offset)) {                // can't use a predicate
+                                              // in load constant instructs
+      __ add_slow($tmp$$Register, r, offset);
+      r = $tmp$$Register;
+      offset = 0;
+    }
+    __ ldr_double($dst$$FloatRegister, Address(r, offset));
+  %}
+  ins_pipe(loadConFD);
+%}
+
+// Prefetch instructions.
+// Must be safe to execute with invalid address (cannot fault).
+
+instruct prefetchAlloc_mp( memoryP mem ) %{
+  predicate(os::is_MP());
+  match( PrefetchAllocation mem );
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "PLDW $mem\t! Prefetch allocation" %}
+  ins_encode %{
+#ifdef AARCH64
+    __ prfm(pstl1keep, $mem$$Address);
+#else
+    __ pldw($mem$$Address);
+#endif
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct prefetchAlloc_sp( memoryP mem ) %{
+  predicate(!os::is_MP());
+  match( PrefetchAllocation mem );
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "PLD $mem\t! Prefetch allocation" %}
+  ins_encode %{
+#ifdef AARCH64
+    __ prfm(pstl1keep, $mem$$Address);
+#else
+    __ pld($mem$$Address);
+#endif
+  %}
+  ins_pipe(iload_mem);
+%}
+
+//----------Store Instructions-------------------------------------------------
+// Store Byte
+instruct storeB(memoryB mem, store_RegI src) %{
+  match(Set mem (StoreB mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "STRB    $src,$mem\t! byte" %}
+  ins_encode %{
+    __ strb($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+instruct storeCM(memoryB mem, store_RegI src) %{
+  match(Set mem (StoreCM mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "STRB    $src,$mem\t! CMS card-mark byte" %}
+  ins_encode %{
+    __ strb($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store Char/Short
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeCoff(store_RegI src, memoryScaledS mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreC (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "STRH    $src,$mem+$off\t! short temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ strh($src$$Register, nmem);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+instruct storeC(memoryS mem, store_RegI src) %{
+  match(Set mem (StoreC mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "STRH    $src,$mem\t! short" %}
+  ins_encode %{
+    __ strh($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store Integer
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeIoff(store_RegI src, memoryScaledI mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreI (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "str_32 $src,$mem+$off\t! int temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str_32($src$$Register, nmem);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+instruct storeI(memoryI mem, store_RegI src) %{
+  match(Set mem (StoreI mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "str_32 $src,$mem" %}
+  ins_encode %{
+    __ str_32($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+// Store Long
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeLoff(store_RegLd src, memoryScaledL mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreL (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "str_64 $src,$mem+$off\t! long temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str_64($src$$Register, nmem);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+instruct storeL(memoryL mem, store_RegLd src) %{
+#ifdef AARCH64
+  // already atomic for Aarch64
+#else
+  predicate(!((StoreLNode*)n)->require_atomic_access());
+#endif
+  match(Set mem (StoreL mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "str_64  $src,$mem\t! long\n\t" %}
+
+  ins_encode %{
+    __ str_64($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+#ifndef AARCH64
+instruct storeL_2instr(memorylong mem, iRegL src) %{
+  predicate(!((StoreLNode*)n)->require_atomic_access());
+  match(Set mem (StoreL mem src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+
+  size(8);
+  format %{ "STR    $src.lo,$mem\t! long\n\t"
+            "STR    $src.hi,$mem+4" %}
+
+  ins_encode %{
+    Address Amemlo = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    Address Amemhi = Address::make_raw($mem$$base, $mem$$index, $mem$$scale, $mem$$disp + 4, relocInfo::none);
+    __ str($src$$Register, Amemlo);
+    __ str($src$$Register->successor(), Amemhi);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+instruct storeL_volatile(indirect mem, iRegL src) %{
+  predicate(((StoreLNode*)n)->require_atomic_access());
+  match(Set mem (StoreL mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "STMIA    $src,$mem\t! long" %}
+  ins_encode %{
+    // FIXME: why is stmia considered atomic?  Should be strexd
+    RegisterSet set($src$$Register);
+    set = set | reg_to_register_object($src$$reg + 1);
+    __ stmia(reg_to_register_object($mem$$base), set);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif // !AARCH64
+
+#ifndef AARCH64
+instruct storeL_volatile_fp(memoryD mem, iRegL src) %{
+  predicate(((StoreLNode*)n)->require_atomic_access());
+  match(Set mem (StoreL mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(8);
+  format %{ "FMDRR    S14, $src\t! long \n\t"
+            "FSTD     S14, $mem" %}
+  ins_encode %{
+    __ fmdrr(S14, $src$$Register, $src$$Register->successor());
+    __ fstd(S14, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+#ifdef XXX
+// Move SP Pointer
+//instruct movSP(sp_ptr_RegP dst, SPRegP src) %{
+//instruct movSP(iRegP dst, SPRegP src) %{
+instruct movSP(store_ptr_RegP dst, SPRegP src) %{
+  match(Set dst src);
+//predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con == TypeFunc::FramePtr);
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "MOV    $dst,$src\t! SP ptr\n\t" %}
+  ins_encode %{
+    assert(false, "XXX1 got here");
+    __ mov($dst$$Register, SP);
+    __ mov($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif
+
+#ifdef AARCH64
+// FIXME
+// Store SP Pointer
+instruct storeSP(memoryP mem, SPRegP src, iRegP tmp) %{
+  match(Set mem (StoreP mem src));
+  predicate(_kids[1]->_leaf->is_Proj() && _kids[1]->_leaf->as_Proj()->_con == TypeFunc::FramePtr);
+  // Multiple StoreP rules, different only in register mask.
+  // Matcher makes the last always valid.  The others will
+  // only be valid if they cost less than the last valid
+  // rule.  So cost(rule1) < cost(rule2) < cost(last)
+  // Unlike immediates, register constraints are not checked
+  // at match time.
+  ins_cost(MEMORY_REF_COST+DEFAULT_COST+4);
+  effect(TEMP tmp);
+  size(8);
+
+  format %{ "MOV    $tmp,$src\t! SP ptr\n\t"
+            "STR    $tmp,$mem\t! SP ptr" %}
+  ins_encode %{
+    assert($src$$Register == SP, "SP expected");
+    __ mov($tmp$$Register, $src$$Register);
+    __ str($tmp$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_spORreg); // FIXME
+%}
+#endif // AARCH64
+
+// Store Pointer
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storePoff(store_ptr_RegP src, memoryScaledP mem, aimmX off, iRegP tmp) %{
+  predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con != TypeFunc::FramePtr);
+  match(Set mem (StoreP (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "STR    $src,$mem+$off\t! ptr temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str($src$$Register, nmem);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+instruct storeP(memoryP mem, store_ptr_RegP src) %{
+  match(Set mem (StoreP mem src));
+#ifdef AARCH64
+  predicate(!_kids[1]->_leaf->is_Proj() || _kids[1]->_leaf->as_Proj()->_con != TypeFunc::FramePtr);
+#endif
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "STR    $src,$mem\t! ptr" %}
+  ins_encode %{
+    __ str($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_spORreg);
+%}
+
+#ifdef AARCH64
+// Store NULL Pointer
+instruct storeP0(memoryP mem, immP0 src) %{
+  match(Set mem (StoreP mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "STR    ZR,$mem\t! ptr" %}
+  ins_encode %{
+    __ str(ZR, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_spORreg);
+%}
+#endif // AARCH64
+
+#ifdef _LP64
+// Store Compressed Pointer
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeNoff(store_RegN src, memoryScaledI mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreN (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "str_32 $src,$mem+$off\t! compressed ptr temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str_32($src$$Register, nmem);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+instruct storeN(memoryI mem, store_RegN src) %{
+  match(Set mem (StoreN mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "str_32 $src,$mem\t! compressed ptr" %}
+  ins_encode %{
+    __ str_32($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+
+#ifdef AARCH64
+// Store NULL Pointer
+instruct storeN0(memoryI mem, immN0 src) %{
+  match(Set mem (StoreN mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "str_32 ZR,$mem\t! compressed ptr" %}
+  ins_encode %{
+    __ str_32(ZR, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+// Store Compressed Klass Pointer
+instruct storeNKlass(memoryI mem, store_RegN src) %{
+  match(Set mem (StoreNKlass mem src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+
+  format %{ "str_32 $src,$mem\t! compressed klass ptr" %}
+  ins_encode %{
+    __ str_32($src$$Register, $mem$$Address);
+  %}
+  ins_pipe(istore_mem_reg);
+%}
+#endif
+
+// Store Double
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeDoff(regD src, memoryScaledD mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreD (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "STR    $src,$mem+$off\t! double temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str_d($src$$FloatRegister, nmem);
+  %}
+  ins_pipe(fstoreD_mem_reg);
+%}
+#endif
+
+instruct storeD(memoryD mem, regD src) %{
+  match(Set mem (StoreD mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  // FIXME: needs to be atomic, but  ARMv7 A.R.M. guarantees
+  // only LDREXD and STREXD are 64-bit single-copy atomic
+  format %{ "FSTD   $src,$mem" %}
+  ins_encode %{
+    __ str_double($src$$FloatRegister, $mem$$Address);
+  %}
+  ins_pipe(fstoreD_mem_reg);
+%}
+
+#ifdef AARCH64
+instruct movI2F(regF dst, iRegI src) %{
+  match(Set dst src);
+  size(4);
+
+  format %{ "FMOV_sw $dst,$src\t! movI2F" %}
+  ins_encode %{
+    __ fmov_sw($dst$$FloatRegister, $src$$Register);
+  %}
+  ins_pipe(ialu_reg); // FIXME
+%}
+
+instruct movF2I(iRegI dst, regF src) %{
+  match(Set dst src);
+  size(4);
+
+  format %{ "FMOV_ws $dst,$src\t! movF2I" %}
+  ins_encode %{
+    __ fmov_ws($dst$$Register, $src$$FloatRegister);
+  %}
+  ins_pipe(ialu_reg); // FIXME
+%}
+#endif
+
+// Store Float
+
+#ifdef AARCH64
+// XXX This variant shouldn't be necessary if 6217251 is implemented
+instruct storeFoff(regF src, memoryScaledF mem, aimmX off, iRegP tmp) %{
+  match(Set mem (StoreF (AddP mem off) src));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST); // assume shift/sign-extend is free
+  effect(TEMP tmp);
+  size(4 * 2);
+
+  format %{ "str_s  $src,$mem+$off\t! float temp=$tmp" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    __ add($tmp$$Register, base, $off$$constant);
+    Address nmem = Address::make_raw($tmp$$reg, $mem$$index, $mem$$scale, $mem$$disp, relocInfo::none);
+    __ str_s($src$$FloatRegister, nmem);
+  %}
+  ins_pipe(fstoreF_mem_reg);
+%}
+#endif
+
+instruct storeF( memoryF mem, regF src) %{
+  match(Set mem (StoreF mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  size(4);
+  format %{ "FSTS    $src,$mem" %}
+  ins_encode %{
+    __ str_float($src$$FloatRegister, $mem$$Address);
+  %}
+  ins_pipe(fstoreF_mem_reg);
+%}
+
+#ifdef AARCH64
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(iRegN dst, iRegP src, flagsReg ccr) %{
+  predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
+  match(Set dst (EncodeP src));
+  effect(KILL ccr);
+  format %{ "encode_heap_oop $dst, $src" %}
+  ins_encode %{
+    __ encode_heap_oop($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
+  predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
+  match(Set dst (EncodeP src));
+  format %{ "encode_heap_oop_not_null $dst, $src" %}
+  ins_encode %{
+    __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop(iRegP dst, iRegN src, flagsReg ccr) %{
+  predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
+            n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
+  match(Set dst (DecodeN src));
+  effect(KILL ccr);
+  format %{ "decode_heap_oop $dst, $src" %}
+  ins_encode %{
+    __ decode_heap_oop($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
+  predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
+            n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
+  match(Set dst (DecodeN src));
+  format %{ "decode_heap_oop_not_null $dst, $src" %}
+  ins_encode %{
+    __ decode_heap_oop_not_null($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
+  match(Set dst (EncodePKlass src));
+  format %{ "encode_klass_not_null $dst, $src" %}
+  ins_encode %{
+    __ encode_klass_not_null($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
+  match(Set dst (DecodeNKlass src));
+  format %{ "decode_klass_not_null $dst, $src" %}
+  ins_encode %{
+    __ decode_klass_not_null($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif // AARCH64
+
+//----------MemBar Instructions-----------------------------------------------
+// Memory barrier flavors
+
+// TODO: take advantage of Aarch64 load-acquire, store-release, etc
+// pattern-match out unnecessary membars
+instruct membar_storestore() %{
+  match(MemBarStoreStore);
+  ins_cost(4*MEMORY_REF_COST);
+
+  size(4);
+  format %{ "MEMBAR-storestore" %}
+  ins_encode %{
+    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct membar_acquire() %{
+  match(MemBarAcquire);
+  match(LoadFence);
+  ins_cost(4*MEMORY_REF_COST);
+
+  size(4);
+  format %{ "MEMBAR-acquire" %}
+  ins_encode %{
+    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), noreg);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct membar_acquire_lock() %{
+  match(MemBarAcquireLock);
+  ins_cost(0);
+
+  size(0);
+  format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
+  ins_encode( );
+  ins_pipe(empty);
+%}
+
+instruct membar_release() %{
+  match(MemBarRelease);
+  match(StoreFence);
+  ins_cost(4*MEMORY_REF_COST);
+
+  size(4);
+  format %{ "MEMBAR-release" %}
+  ins_encode %{
+    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), noreg);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct membar_release_lock() %{
+  match(MemBarReleaseLock);
+  ins_cost(0);
+
+  size(0);
+  format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
+  ins_encode( );
+  ins_pipe(empty);
+%}
+
+instruct membar_volatile() %{
+  match(MemBarVolatile);
+  ins_cost(4*MEMORY_REF_COST);
+
+  size(4);
+  format %{ "MEMBAR-volatile" %}
+  ins_encode %{
+    __ membar(MacroAssembler::StoreLoad, noreg);
+  %}
+  ins_pipe(long_memory_op);
+%}
+
+instruct unnecessary_membar_volatile() %{
+  match(MemBarVolatile);
+  predicate(Matcher::post_store_load_barrier(n));
+  ins_cost(0);
+
+  size(0);
+  format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
+  ins_encode( );
+  ins_pipe(empty);
+%}
+
+//----------Register Move Instructions-----------------------------------------
+// instruct roundDouble_nop(regD dst) %{
+//   match(Set dst (RoundDouble dst));
+//   ins_pipe(empty);
+// %}
+
+
+// instruct roundFloat_nop(regF dst) %{
+//   match(Set dst (RoundFloat dst));
+//   ins_pipe(empty);
+// %}
+
+
+#ifdef AARCH64
+// 0 constant in register
+instruct zrImmI0(ZRRegI dst, immI0 imm) %{
+  match(Set dst imm);
+  size(0);
+  ins_cost(0);
+
+  format %{ "! ZR (int 0)" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_none);
+%}
+
+// 0 constant in register
+instruct zrImmL0(ZRRegL dst, immL0 imm) %{
+  match(Set dst imm);
+  size(0);
+  ins_cost(0);
+
+  format %{ "! ZR (long 0)" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_none);
+%}
+
+#ifdef XXX
+// 0 constant in register
+instruct zrImmN0(ZRRegN dst, immN0 imm) %{
+  match(Set dst imm);
+  size(0);
+  ins_cost(0);
+
+  format %{ "! ZR (compressed pointer NULL)" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_none);
+%}
+
+// 0 constant in register
+instruct zrImmP0(ZRRegP dst, immP0 imm) %{
+  match(Set dst imm);
+  size(0);
+  ins_cost(0);
+
+  format %{ "! ZR (NULL)" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_none);
+%}
+#endif
+#endif // AARCH64
+
+// Cast Index to Pointer for unsafe natives
+instruct castX2P(iRegX src, iRegP dst) %{
+  match(Set dst (CastX2P src));
+
+  format %{ "MOV    $dst,$src\t! IntX->Ptr if $dst != $src" %}
+  ins_encode %{
+    if ($dst$$Register !=  $src$$Register) {
+      __ mov($dst$$Register, $src$$Register);
+    }
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// Cast Pointer to Index for unsafe natives
+instruct castP2X(iRegP src, iRegX dst) %{
+  match(Set dst (CastP2X src));
+
+  format %{ "MOV    $dst,$src\t! Ptr->IntX if $dst != $src" %}
+  ins_encode %{
+    if ($dst$$Register !=  $src$$Register) {
+      __ mov($dst$$Register, $src$$Register);
+    }
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+#ifndef AARCH64
+//----------Conditional Move---------------------------------------------------
+// Conditional move
+instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src\t! int" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif
+
+#ifdef AARCH64
+instruct cmovI_reg3(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovL_reg3(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovP_reg3(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovN_reg3(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovIP_reg3(cmpOpP cmp, flagsRegP icc, iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovLP_reg3(cmpOpP cmp, flagsRegP icc, iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPP_reg3(cmpOpP cmp, flagsRegP icc, iRegP dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovNP_reg3(cmpOpP cmp, flagsRegP icc, iRegN dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovIU_reg3(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovLU_reg3(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPU_reg3(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovNU_reg3(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovIZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! int" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovLZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! long" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovNZ_reg3(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegN dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "CSEL $dst,$src1,$src2,$cmp\t! compressed ptr" %}
+  ins_encode %{
+    __ csel($dst$$Register, $src1$$Register, $src2$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif // AARCH64
+
+#ifndef AARCH64
+instruct cmovIP_immMov(cmpOpP cmp, flagsRegP pcc, iRegI dst, immIMov src) %{
+  match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovIP_imm16(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI16 src) %{
+  match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOVw$cmp  $dst,$src" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+instruct cmovI_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+#ifdef AARCH64
+instruct cmovL_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src\t! long" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif
+
+#ifndef AARCH64
+instruct cmovI_immMov(cmpOp cmp, flagsReg icc, iRegI dst, immIMov src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovII_imm16(cmpOp cmp, flagsReg icc, iRegI dst, immI16 src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOVw$cmp  $dst,$src" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+instruct cmovII_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+#ifndef AARCH64
+instruct cmovII_immMov_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immIMov src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(140);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovII_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegI dst, immI16 src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(140);
+  size(4);
+  format %{ "MOVW$cmp  $dst,$src" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+#ifndef AARCH64
+instruct cmovIIu_immMov(cmpOpU cmp, flagsRegU icc, iRegI dst, immIMov src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovIIu_imm16(cmpOpU cmp, flagsRegU icc, iRegI dst, immI16 src) %{
+  match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+  format %{ "MOVW$cmp  $dst,$src" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+#endif
+
+// Conditional move
+instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "MOV$cmp  $dst,$src" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
+  ins_cost(140);
+  size(4);
+#ifdef AARCH64
+  format %{ "MOV$cmp  $dst,ZR" %}
+#else
+  format %{ "MOV$cmp  $dst,$src" %}
+#endif
+  ins_encode %{
+#ifdef AARCH64
+    __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
+#else
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+#endif
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+// This instruction also works with CmpN so we don't need cmovPN_reg.
+instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(4);
+#ifdef AARCH64
+  format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
+#else
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+#endif
+  ins_encode %{
+#ifdef AARCH64
+    __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
+#else
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+#endif
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovPI_imm_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(140);
+
+  size(4);
+#ifdef AARCH64
+  format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
+#else
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+#endif
+  ins_encode %{
+#ifdef AARCH64
+    __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
+#else
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+#endif
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
+  match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(4);
+#ifdef AARCH64
+  format %{ "MOV$cmp  $dst,ZR\t! ptr" %}
+#else
+  format %{ "MOV$cmp  $dst,$src\t! ptr" %}
+#endif
+  ins_encode %{
+#ifdef AARCH64
+    __ mov($dst$$Register,             ZR, (AsmCondition)($cmp$$cmpcode));
+#else
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+#endif
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+#ifdef AARCH64
+// Conditional move
+instruct cmovF_reg(cmpOp cmp, flagsReg icc, regF dst, regF src1, regF src2) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovD_reg(cmpOp cmp, flagsReg icc, regD dst, regD src1, regD src2) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFP_reg(cmpOpP cmp, flagsRegP icc, regF dst, regF src1, regF src2) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovDP_reg(cmpOpP cmp, flagsRegP icc, regD dst, regD src1, regD src2) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFU_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src1, regF src2) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovDU_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src1, regD src2) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFZ_reg(cmpOp0 cmp, flagsReg_EQNELTGE icc, regF dst, regF src1, regF src2) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_s $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_s($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovDZ_reg(cmpOp0 cmp, flagsReg_EQNELTGE icc, regD dst, regD src1, regD src2) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary src2 src1)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCSEL_d $dst,$src1,$src2,$cmp" %}
+  ins_encode %{
+    __ fcsel_d($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+#else // !AARCH64
+
+// Conditional move
+instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCPYS$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYS$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYS$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYS$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpys($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_float_move);
+%}
+
+// Conditional move
+instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
+  ins_cost(150);
+  size(4);
+  format %{ "FCPYD$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_double_move);
+%}
+
+instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYD$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_double_move);
+%}
+
+instruct cmovDI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt || _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYD$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_double_move);
+%}
+
+instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(4);
+  format %{ "FCPYD$cmp $dst,$src" %}
+  ins_encode %{
+    __ fcpyd($dst$$FloatRegister, $src$$FloatRegister, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(int_conditional_double_move);
+%}
+
+// Conditional move
+instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
+  ins_cost(150);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
+            "MOV$cmp  $dst.hi,$src.hi" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct cmovLP_immRot(cmpOpP cmp, flagsRegP pcc, iRegL dst, immLlowRot src) %{
+  match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovLP_imm16(cmpOpP cmp, flagsRegP pcc, iRegL dst, immL16 src) %{
+  match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
+            "MOV$cmp  $dst.hi,$src.hi" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct cmovLI_reg_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(150);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
+            "MOV$cmp  $dst.hi,$src.hi" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct cmovLI_immRot(cmpOp cmp, flagsReg icc, iRegL dst, immLlowRot src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct cmovLI_immRot_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immLlowRot src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovLI_imm16(cmpOp cmp, flagsReg icc, iRegL dst, immL16 src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovLI_imm16_EQNELTGE(cmpOp0 cmp, flagsReg_EQNELTGE icc, iRegL dst, immL16 src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::eq ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt ||
+            _kids[0]->_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ge);
+  ins_cost(140);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src\t! long\n\t"
+            "MOV$cmp  $dst.hi,0" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$constant, (AsmCondition)($cmp$$cmpcode));
+    __ movw($dst$$Register->successor(), 0, (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_imm);
+%}
+
+instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+  ins_cost(150);
+
+  size(8);
+  format %{ "MOV$cmp  $dst.lo,$src.lo\t! long\n\t"
+            "MOV$cmp  $dst.hi,$src.hi" %}
+  ins_encode %{
+    __ mov($dst$$Register, $src$$Register, (AsmCondition)($cmp$$cmpcode));
+    __ mov($dst$$Register->successor(), $src$$Register->successor(), (AsmCondition)($cmp$$cmpcode));
+  %}
+  ins_pipe(ialu_reg);
+%}
+#endif // !AARCH64
+
+
+//----------OS and Locking Instructions----------------------------------------
+
+// This name is KNOWN by the ADLC and cannot be changed.
+// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
+// for this guy.
+instruct tlsLoadP(RthreadRegP dst) %{
+  match(Set dst (ThreadLocal));
+
+  size(0);
+  ins_cost(0);
+  format %{ "! TLS is in $dst" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_none);
+%}
+
+instruct checkCastPP( iRegP dst ) %{
+  match(Set dst (CheckCastPP dst));
+
+  size(0);
+  format %{ "! checkcastPP of $dst" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(empty);
+%}
+
+
+instruct castPP( iRegP dst ) %{
+  match(Set dst (CastPP dst));
+  format %{ "! castPP of $dst" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(empty);
+%}
+
+instruct castII( iRegI dst ) %{
+  match(Set dst (CastII dst));
+  format %{ "! castII of $dst" %}
+  ins_encode( /*empty encoding*/ );
+  ins_cost(0);
+  ins_pipe(empty);
+%}
+
+//----------Arithmetic Instructions--------------------------------------------
+// Addition Instructions
+// Register Addition
+instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (AddI src1 src2));
+
+  size(4);
+  format %{ "add_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct addshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AddI (LShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+#ifdef AARCH64
+#ifdef TODO
+instruct addshlL_reg_imm_reg(iRegL dst, iRegL src1, immU6 src2, iRegL src3) %{
+  match(Set dst (AddL (LShiftL src1 src2) src3));
+
+  size(4);
+  format %{ "ADD    $dst,$src3,$src1<<$src2\t! long" %}
+  ins_encode %{
+    __ add($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+#endif
+
+instruct addshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (AddI (LShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1<<$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct addsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AddI (RShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct addsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (AddI (RShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1>>$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct addshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AddI (URShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct addshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (AddI (URShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "add_32 $dst,$src3,$src1>>>$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Immediate Addition
+instruct addI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
+  match(Set dst (AddI src1 src2));
+
+  size(4);
+  format %{ "add_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ add_32($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Pointer Register Addition
+instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
+  match(Set dst (AddP src1 src2));
+
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
+  ins_encode %{
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifdef AARCH64
+// unshifted I2L operand
+operand unshiftedI2L(iRegI src2) %{
+//constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(ConvI2L src2);
+
+  op_cost(1);
+  format %{ "$src2.w" %}
+  interface(MEMORY_INTER) %{
+    base($src2);
+    index(0xff);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+// shifted I2L operand
+operand shiftedI2L(iRegI src2, immI_0_4 src3) %{
+//constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(LShiftX (ConvI2L src2) src3);
+
+  op_cost(1);
+  format %{ "$src2.w << $src3" %}
+  interface(MEMORY_INTER) %{
+    base($src2);
+    index(0xff);
+    scale($src3);
+    disp(0x0);
+  %}
+%}
+
+opclass shiftedRegI(shiftedI2L, unshiftedI2L);
+
+instruct shlL_reg_regI(iRegL dst, iRegI src1, immU6 src2) %{
+  match(Set dst (LShiftL (ConvI2L src1) src2));
+
+  size(4);
+  format %{ "LSL    $dst,$src1.w,$src2\t! ptr" %}
+  ins_encode %{
+    int c = $src2$$constant;
+    int r = 64 - c;
+    int s = 31;
+    if (s >= r) {
+      s = r - 1;
+    }
+    __ sbfm($dst$$Register, $src1$$Register, r, s);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addP_reg_regI(iRegP dst, iRegP src1, shiftedRegI src2) %{
+  match(Set dst (AddP src1 src2));
+
+  ins_cost(DEFAULT_COST * 3/2);
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2, sxtw\t! ptr" %}
+  ins_encode %{
+    Register base = reg_to_register_object($src2$$base);
+    __ add($dst$$Register, $src1$$Register, base, ex_sxtw, $src2$$scale);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+// shifted iRegX operand
+operand shiftedX(iRegX src2, shimmX src3) %{
+//constraint(ALLOC_IN_RC(sp_ptr_reg));
+  match(LShiftX src2 src3);
+
+  op_cost(1);
+  format %{ "$src2 << $src3" %}
+  interface(MEMORY_INTER) %{
+    base($src2);
+    index(0xff);
+    scale($src3);
+    disp(0x0);
+  %}
+%}
+
+instruct addshlP_reg_reg_imm(iRegP dst, iRegP src1, shiftedX src2) %{
+  match(Set dst (AddP src1 src2));
+
+  ins_cost(DEFAULT_COST * 3/2);
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
+  ins_encode %{
+    Register base = reg_to_register_object($src2$$base);
+    __ add($dst$$Register, $src1$$Register, AsmOperand(base, lsl, $src2$$scale));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Pointer Immediate Addition
+instruct addP_reg_aimmX(iRegP dst, iRegP src1, aimmX src2) %{
+  match(Set dst (AddP src1 src2));
+
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2\t! ptr" %}
+  ins_encode %{
+    __ add($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Long Addition
+#ifdef AARCH64
+instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (AddL src1 src2));
+  size(4);
+  format %{ "ADD     $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addL_reg_regI(iRegL dst, iRegL src1, shiftedRegI src2) %{
+  match(Set dst (AddL src1 src2));
+
+  ins_cost(DEFAULT_COST * 3/2);
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2, sxtw\t! long" %}
+  ins_encode %{
+    Register base = reg_to_register_object($src2$$base);
+    __ add($dst$$Register, $src1$$Register, base, ex_sxtw, $src2$$scale);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#else
+instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg ccr) %{
+  match(Set dst (AddL src1 src2));
+  effect(KILL ccr);
+  size(8);
+  format %{ "ADDS    $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
+            "ADC     $dst.hi,$src1.hi,$src2.hi" %}
+  ins_encode %{
+    __ adds($dst$$Register, $src1$$Register, $src2$$Register);
+    __ adc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+#ifdef AARCH64
+// Immediate Addition
+instruct addL_reg_aimm(iRegL dst, iRegL src1, aimmL src2) %{
+  match(Set dst (AddL src1 src2));
+
+  size(4);
+  format %{ "ADD    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ add($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct addL_reg_immLneg(iRegL dst, iRegL src1, aimmLneg src2) %{
+  match(Set dst (SubL src1 src2));
+
+  size(4);
+  format %{ "ADD    $dst,$src1,-($src2)\t! long" %}
+  ins_encode %{
+    __ add($dst$$Register, $src1$$Register, -$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// TODO
+#endif
+
+#ifndef AARCH64
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct addL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg ccr) %{
+  match(Set dst (AddL src1 con));
+  effect(KILL ccr);
+  size(8);
+  format %{ "ADDS    $dst.lo,$src1.lo,$con\t! long\n\t"
+            "ADC     $dst.hi,$src1.hi,0" %}
+  ins_encode %{
+    __ adds($dst$$Register, $src1$$Register, $con$$constant);
+    __ adc($dst$$Register->successor(), $src1$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
+//----------Conditional_store--------------------------------------------------
+// Conditional-store of the updated heap-top.
+// Used during allocation of the shared heap.
+// Sets flags (EQ) on success.
+
+// TODO: optimize out barriers with AArch64 load-acquire/store-release
+// LoadP-locked.
+instruct loadPLocked(iRegP dst, memoryex mem) %{
+  match(Set dst (LoadPLocked mem));
+  size(4);
+  format %{ "LDREX  $dst,$mem" %}
+  ins_encode %{
+#ifdef AARCH64
+    Register base = reg_to_register_object($mem$$base);
+    __ ldxr($dst$$Register, base);
+#else
+    __ ldrex($dst$$Register,$mem$$Address);
+#endif
+  %}
+  ins_pipe(iload_mem);
+%}
+
+instruct storePConditional( memoryex heap_top_ptr, iRegP oldval, iRegP newval, iRegI tmp, flagsRegP pcc ) %{
+  predicate(_kids[1]->_kids[0]->_leaf->Opcode() == Op_LoadPLocked); // only works in conjunction with a LoadPLocked node
+  match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
+  effect( TEMP tmp );
+  size(8);
+  format %{ "STREX  $tmp,$newval,$heap_top_ptr\n\t"
+            "CMP    $tmp, 0" %}
+  ins_encode %{
+#ifdef AARCH64
+    Register base = reg_to_register_object($heap_top_ptr$$base);
+    __ stxr($tmp$$Register, $newval$$Register, base);
+#else
+    __ strex($tmp$$Register, $newval$$Register, $heap_top_ptr$$Address);
+#endif
+    __ cmp($tmp$$Register, 0);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+// Conditional-store of an intx value.
+instruct storeXConditional( memoryex mem, iRegX oldval, iRegX newval, iRegX tmp, flagsReg icc ) %{
+#ifdef AARCH64
+  match(Set icc (StoreLConditional mem (Binary oldval newval)));
+  effect( TEMP tmp );
+  size(28);
+  format %{ "loop:\n\t"
+            "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem], DOESN'T set $newval=[$mem] in any case\n\t"
+            "SUBS     $tmp, $tmp, $oldval\n\t"
+            "B.ne     done\n\t"
+            "STXR     $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop\n\t"
+            "CMP      $tmp, 0\n\t"
+            "done:\n\t"
+            "membar   LoadStore|LoadLoad" %}
+#else
+  match(Set icc (StoreIConditional mem (Binary oldval newval)));
+  effect( TEMP tmp );
+  size(28);
+  format %{ "loop: \n\t"
+            "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem], DOESN'T set $newval=[$mem] in any case\n\t"
+            "XORS     $tmp,$tmp, $oldval\n\t"
+            "STREX.eq $tmp, $newval, $mem\n\t"
+            "CMP.eq   $tmp, 1 \n\t"
+            "B.eq     loop \n\t"
+            "TEQ      $tmp, 0\n\t"
+            "membar   LoadStore|LoadLoad" %}
+#endif
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+#ifdef AARCH64
+// FIXME: use load-acquire/store-release, remove membar?
+    Label done;
+    Register base = reg_to_register_object($mem$$base);
+    __ ldxr($tmp$$Register, base);
+    __ subs($tmp$$Register, $tmp$$Register, $oldval$$Register);
+    __ b(done, ne);
+    __ stxr($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+    __ cmp($tmp$$Register, 0);
+    __ bind(done);
+#else
+    __ ldrex($tmp$$Register, $mem$$Address);
+    __ eors($tmp$$Register, $tmp$$Register, $oldval$$Register);
+    __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
+    __ cmp($tmp$$Register, 1, eq);
+    __ b(loop, eq);
+    __ teq($tmp$$Register, 0);
+#endif
+    // used by biased locking only. Requires a membar.
+    __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::LoadLoad), noreg);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+// No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
+
+#ifdef AARCH64
+// TODO: if combined with membar, elide membar and use
+// load-acquire/store-release if appropriate
+instruct compareAndSwapL_bool(memoryex mem, iRegL oldval, iRegL newval, iRegI res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (CompareAndSwapL mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(24);
+  format %{ "loop:\n\t"
+            "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP      $tmp, $oldval\n\t"
+            "B.ne     done\n\t"
+            "STXR     $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop\n\t"
+            "done:\n\t"
+            "CSET_w   $res, eq" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    Label loop, done;
+    __ bind(loop);
+    __ ldxr($tmp$$Register, base);
+    __ cmp($tmp$$Register, $oldval$$Register);
+    __ b(done, ne);
+    __ stxr($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+    __ bind(done);
+    __ cset_w($res$$Register, eq);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (CompareAndSwapI mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(24);
+  format %{ "loop:\n\t"
+            "LDXR_w   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP_w    $tmp, $oldval\n\t"
+            "B.ne     done\n\t"
+            "STXR_w   $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop\n\t"
+            "done:\n\t"
+            "CSET_w   $res, eq" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    Label loop, done;
+    __ bind(loop);
+    __ ldxr_w($tmp$$Register, base);
+    __ cmp_w($tmp$$Register, $oldval$$Register);
+    __ b(done, ne);
+    __ stxr_w($tmp$$Register, $newval$$Register,  base);
+    __ cbnz_w($tmp$$Register, loop);
+    __ bind(done);
+    __ cset_w($res$$Register, eq);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+// tmp must use iRegI instead of iRegN until 8051805 is fixed.
+instruct compareAndSwapN_bool(memoryex mem, iRegN oldval, iRegN newval, iRegI res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (CompareAndSwapN mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(24);
+  format %{ "loop:\n\t"
+            "LDXR_w   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP_w    $tmp, $oldval\n\t"
+            "B.ne     done\n\t"
+            "STXR_w   $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop\n\t"
+            "done:\n\t"
+            "CSET_w   $res, eq" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    Label loop, done;
+    __ bind(loop);
+    __ ldxr_w($tmp$$Register, base);
+    __ cmp_w($tmp$$Register, $oldval$$Register);
+    __ b(done, ne);
+    __ stxr_w($tmp$$Register, $newval$$Register,  base);
+    __ cbnz_w($tmp$$Register, loop);
+    __ bind(done);
+    __ cset_w($res$$Register, eq);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(24);
+  format %{ "loop:\n\t"
+            "LDXR     $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP      $tmp, $oldval\n\t"
+            "B.ne     done\n\t"
+            "STXR     $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop\n\t"
+            "done:\n\t"
+            "CSET_w   $res, eq" %}
+  ins_encode %{
+    Register base = reg_to_register_object($mem$$base);
+    Label loop, done;
+    __ bind(loop);
+    __ ldxr($tmp$$Register, base);
+    __ cmp($tmp$$Register, $oldval$$Register);
+    __ b(done, ne);
+    __ stxr($tmp$$Register, $newval$$Register,  base);
+    __ cbnz_w($tmp$$Register, loop);
+    __ bind(done);
+    __ cset_w($res$$Register, eq);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else // !AARCH64
+instruct compareAndSwapL_bool(memoryex mem, iRegL oldval, iRegLd newval, iRegI res, iRegLd tmp, flagsReg ccr ) %{
+  match(Set res (CompareAndSwapL mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(32);
+  format %{ "loop: \n\t"
+            "LDREXD   $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP      $tmp.lo, $oldval.lo\n\t"
+            "CMP.eq   $tmp.hi, $oldval.hi\n\t"
+            "STREXD.eq $tmp, $newval, $mem\n\t"
+            "MOV.ne   $tmp, 0 \n\t"
+            "XORS.eq  $tmp,$tmp, 1 \n\t"
+            "B.eq     loop \n\t"
+            "MOV      $res, $tmp" %}
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($tmp$$Register, $mem$$Address);
+    __ cmp($tmp$$Register, $oldval$$Register);
+    __ cmp($tmp$$Register->successor(), $oldval$$Register->successor(), eq);
+    __ strexd($tmp$$Register, $newval$$Register, $mem$$Address, eq);
+    __ mov($tmp$$Register, 0, ne);
+    __ eors($tmp$$Register, $tmp$$Register, 1, eq);
+    __ b(loop, eq);
+    __ mov($res$$Register, $tmp$$Register);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+
+instruct compareAndSwapI_bool(memoryex mem, iRegI oldval, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
+  match(Set res (CompareAndSwapI mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(28);
+  format %{ "loop: \n\t"
+            "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP      $tmp, $oldval\n\t"
+            "STREX.eq $tmp, $newval, $mem\n\t"
+            "MOV.ne   $tmp, 0 \n\t"
+            "XORS.eq  $tmp,$tmp, 1 \n\t"
+            "B.eq     loop \n\t"
+            "MOV      $res, $tmp" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($tmp$$Register,$mem$$Address);
+    __ cmp($tmp$$Register, $oldval$$Register);
+    __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
+    __ mov($tmp$$Register, 0, ne);
+    __ eors($tmp$$Register, $tmp$$Register, 1, eq);
+    __ b(loop, eq);
+    __ mov($res$$Register, $tmp$$Register);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+instruct compareAndSwapP_bool(memoryex mem, iRegP oldval, iRegP newval, iRegI res, iRegI tmp, flagsReg ccr ) %{
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+  effect( KILL ccr, TEMP tmp);
+  size(28);
+  format %{ "loop: \n\t"
+            "LDREX    $tmp, $mem\t! If $oldval==[$mem] Then store $newval into [$mem]\n\t"
+            "CMP      $tmp, $oldval\n\t"
+            "STREX.eq $tmp, $newval, $mem\n\t"
+            "MOV.ne   $tmp, 0 \n\t"
+            "EORS.eq  $tmp,$tmp, 1 \n\t"
+            "B.eq     loop \n\t"
+            "MOV      $res, $tmp" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($tmp$$Register,$mem$$Address);
+    __ cmp($tmp$$Register, $oldval$$Register);
+    __ strex($tmp$$Register, $newval$$Register, $mem$$Address, eq);
+    __ mov($tmp$$Register, 0, ne);
+    __ eors($tmp$$Register, $tmp$$Register, 1, eq);
+    __ b(loop, eq);
+    __ mov($res$$Register, $tmp$$Register);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif // !AARCH64
+
+#ifdef AARCH64
+instruct xaddI_aimmI_no_res(memoryex mem, aimmI add, Universe dummy, iRegI tmp1, iRegI tmp2) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem add));
+  effect(TEMP tmp1, TEMP tmp2);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR_w   $tmp1, $mem\n\t"
+            "ADD_w    $tmp1, $tmp1, $add\n\t"
+            "STXR_w   $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($tmp1$$Register, base);
+    __ add_w($tmp1$$Register, $tmp1$$Register, $add$$constant);
+    __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddI_aimmI_no_res(memoryex mem, aimmI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem add));
+  effect(KILL ccr, TEMP tmp1, TEMP tmp2);
+  size(20);
+  format %{ "loop: \n\t"
+            "LDREX    $tmp1, $mem\n\t"
+            "ADD      $tmp1, $tmp1, $add\n\t"
+            "STREX    $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($tmp1$$Register,$mem$$Address);
+    __ add($tmp1$$Register, $tmp1$$Register, $add$$constant);
+    __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddI_reg_no_res(memoryex mem, iRegI add, Universe dummy, iRegI tmp1, iRegI tmp2) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem add));
+  effect(TEMP tmp1, TEMP tmp2);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR_w   $tmp1, $mem\n\t"
+            "ADD_w    $tmp1, $tmp1, $add\n\t"
+            "STXR_w   $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($tmp1$$Register, base);
+    __ add_w($tmp1$$Register, $tmp1$$Register, $add$$Register);
+    __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddI_reg_no_res(memoryex mem, iRegI add, Universe dummy, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem add));
+  effect(KILL ccr, TEMP tmp1, TEMP tmp2);
+  size(20);
+  format %{ "loop: \n\t"
+            "LDREX    $tmp1, $mem\n\t"
+            "ADD      $tmp1, $tmp1, $add\n\t"
+            "STREX    $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($tmp1$$Register,$mem$$Address);
+    __ add($tmp1$$Register, $tmp1$$Register, $add$$Register);
+    __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddI_aimmI(memoryex mem, aimmI add, iRegI res, iRegI tmp1, iRegI tmp2) %{
+  match(Set res (GetAndAddI mem add));
+  effect(TEMP tmp1, TEMP tmp2, TEMP res);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR_w   $res, $mem\n\t"
+            "ADD_w    $tmp1, $res, $add\n\t"
+            "STXR_w   $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($res$$Register, base);
+    __ add_w($tmp1$$Register, $res$$Register, $add$$constant);
+    __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddI_aimmI(memoryex mem, aimmI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
+  match(Set res (GetAndAddI mem add));
+  effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
+  size(20);
+  format %{ "loop: \n\t"
+            "LDREX    $res, $mem\n\t"
+            "ADD      $tmp1, $res, $add\n\t"
+            "STREX    $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($res$$Register,$mem$$Address);
+    __ add($tmp1$$Register, $res$$Register, $add$$constant);
+    __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddI_reg(memoryex mem, iRegI add, iRegI res, iRegI tmp1, iRegI tmp2) %{
+  match(Set res (GetAndAddI mem add));
+  effect(TEMP tmp1, TEMP tmp2, TEMP res);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR_w   $res, $mem\n\t"
+            "ADD_w    $tmp1, $res, $add\n\t"
+            "STXR_w   $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($res$$Register, base);
+    __ add_w($tmp1$$Register, $res$$Register, $add$$Register);
+    __ stxr_w($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddI_reg(memoryex mem, iRegI add, iRegI res, iRegI tmp1, iRegI tmp2, flagsReg ccr) %{
+  match(Set res (GetAndAddI mem add));
+  effect(KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
+  size(20);
+  format %{ "loop: \n\t"
+            "LDREX    $res, $mem\n\t"
+            "ADD      $tmp1, $res, $add\n\t"
+            "STREX    $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($res$$Register,$mem$$Address);
+    __ add($tmp1$$Register, $res$$Register, $add$$Register);
+    __ strex($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddL_reg_no_res(memoryex mem, iRegL add, Universe dummy, iRegL tmp1, iRegI tmp2) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem add));
+  effect(TEMP tmp1, TEMP tmp2);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR     $tmp1, $mem\n\t"
+            "ADD      $tmp1, $tmp1, $add\n\t"
+            "STXR     $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr($tmp1$$Register, base);
+    __ add($tmp1$$Register, $tmp1$$Register, $add$$Register);
+    __ stxr($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddL_reg_no_res(memoryex mem, iRegL add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem add));
+  effect( KILL ccr, TEMP tmp1, TEMP tmp2);
+  size(24);
+  format %{ "loop: \n\t"
+            "LDREXD   $tmp1, $mem\n\t"
+            "ADDS     $tmp1.lo, $tmp1.lo, $add.lo\n\t"
+            "ADC      $tmp1.hi, $tmp1.hi, $add.hi\n\t"
+            "STREXD   $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($tmp1$$Register, $mem$$Address);
+    __ adds($tmp1$$Register, $tmp1$$Register, $add$$Register);
+    __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), $add$$Register->successor());
+    __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddL_imm_no_res(memoryex mem, aimmL add, Universe dummy, iRegL tmp1, iRegI tmp2) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem add));
+  effect(TEMP tmp1, TEMP tmp2);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR     $tmp1, $mem\n\t"
+            "ADD      $tmp1, $tmp1, $add\n\t"
+            "STXR     $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr($tmp1$$Register, base);
+    __ add($tmp1$$Register, $tmp1$$Register, $add$$constant);
+    __ stxr($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct xaddL_immRot_no_res(memoryex mem, immLlowRot add, Universe dummy, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem add));
+  effect( KILL ccr, TEMP tmp1, TEMP tmp2);
+  size(24);
+  format %{ "loop: \n\t"
+            "LDREXD   $tmp1, $mem\n\t"
+            "ADDS     $tmp1.lo, $tmp1.lo, $add\n\t"
+            "ADC      $tmp1.hi, $tmp1.hi, 0\n\t"
+            "STREXD   $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($tmp1$$Register, $mem$$Address);
+    __ adds($tmp1$$Register, $tmp1$$Register, $add$$constant);
+    __ adc($tmp1$$Register->successor(), $tmp1$$Register->successor(), 0);
+    __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddL_reg(memoryex mem, iRegL add, iRegL res, iRegL tmp1, iRegI tmp2) %{
+  match(Set res (GetAndAddL mem add));
+  effect(TEMP tmp1, TEMP tmp2, TEMP res);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR     $res, $mem\n\t"
+            "ADD      $tmp1, $res, $add\n\t"
+            "STXR     $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr($res$$Register, base);
+    __ add($tmp1$$Register, $res$$Register, $add$$Register);
+    __ stxr($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xaddL_reg(memoryex mem, iRegL add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
+  match(Set res (GetAndAddL mem add));
+  effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
+  size(24);
+  format %{ "loop: \n\t"
+            "LDREXD   $res, $mem\n\t"
+            "ADDS     $tmp1.lo, $res.lo, $add.lo\n\t"
+            "ADC      $tmp1.hi, $res.hi, $add.hi\n\t"
+            "STREXD   $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($res$$Register, $mem$$Address);
+    __ adds($tmp1$$Register, $res$$Register, $add$$Register);
+    __ adc($tmp1$$Register->successor(), $res$$Register->successor(), $add$$Register->successor());
+    __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xaddL_imm(memoryex mem, aimmL add, iRegL res, iRegL tmp1, iRegI tmp2) %{
+  match(Set res (GetAndAddL mem add));
+  effect(TEMP tmp1, TEMP tmp2, TEMP res);
+  size(16);
+  format %{ "loop:\n\t"
+            "LDXR     $res, $mem\n\t"
+            "ADD      $tmp1, $res, $add\n\t"
+            "STXR     $tmp2, $tmp1, $mem\n\t"
+            "CBNZ_w   $tmp2, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr($res$$Register, base);
+    __ add($tmp1$$Register, $res$$Register, $add$$constant);
+    __ stxr($tmp2$$Register, $tmp1$$Register, base);
+    __ cbnz_w($tmp2$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct xaddL_immRot(memoryex mem, immLlowRot add, iRegLd res, iRegLd tmp1, iRegI tmp2, flagsReg ccr) %{
+  match(Set res (GetAndAddL mem add));
+  effect( KILL ccr, TEMP tmp1, TEMP tmp2, TEMP res);
+  size(24);
+  format %{ "loop: \n\t"
+            "LDREXD   $res, $mem\n\t"
+            "ADDS     $tmp1.lo, $res.lo, $add\n\t"
+            "ADC      $tmp1.hi, $res.hi, 0\n\t"
+            "STREXD   $tmp2, $tmp1, $mem\n\t"
+            "CMP      $tmp2, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($res$$Register, $mem$$Address);
+    __ adds($tmp1$$Register, $res$$Register, $add$$constant);
+    __ adc($tmp1$$Register->successor(), $res$$Register->successor(), 0);
+    __ strexd($tmp2$$Register, $tmp1$$Register, $mem$$Address);
+    __ cmp($tmp2$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xchgI(memoryex mem, iRegI newval, iRegI res, iRegI tmp) %{
+  match(Set res (GetAndSetI mem newval));
+  effect(TEMP tmp, TEMP res);
+  size(12);
+  format %{ "loop:\n\t"
+            "LDXR_w   $res, $mem\n\t"
+            "STXR_w   $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($res$$Register, base);
+    __ stxr_w($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+
+#ifdef XXX
+// Disabled until 8051805 is fixed.
+instruct xchgN(memoryex mem, iRegN newval, iRegN res, iRegN tmp) %{
+  match(Set res (GetAndSetN mem newval));
+  effect(TEMP tmp, TEMP res);
+  size(12);
+  format %{ "loop:\n\t"
+            "LDXR_w   $res, $mem\n\t"
+            "STXR_w   $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr_w($res$$Register, base);
+    __ stxr_w($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+#else
+instruct xchgI(memoryex mem, iRegI newval, iRegI res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (GetAndSetI mem newval));
+  effect(KILL ccr, TEMP tmp, TEMP res);
+  size(16);
+  format %{ "loop: \n\t"
+            "LDREX    $res, $mem\n\t"
+            "STREX    $tmp, $newval, $mem\n\t"
+            "CMP      $tmp, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($res$$Register,$mem$$Address);
+    __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
+    __ cmp($tmp$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif
+
+#ifdef AARCH64
+instruct xchgL(memoryex mem, iRegL newval, iRegL res, iRegI tmp) %{
+  match(Set res (GetAndSetL mem newval));
+  effect(TEMP tmp, TEMP res);
+  size(12);
+  format %{ "loop:\n\t"
+            "LDXR     $res, $mem\n\t"
+            "STXR     $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldxr($res$$Register, base);
+    __ stxr($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xchgL(memoryex mem, iRegLd newval, iRegLd res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (GetAndSetL mem newval));
+  effect( KILL ccr, TEMP tmp, TEMP res);
+  size(16);
+  format %{ "loop: \n\t"
+            "LDREXD   $res, $mem\n\t"
+            "STREXD   $tmp, $newval, $mem\n\t"
+            "CMP      $tmp, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrexd($res$$Register, $mem$$Address);
+    __ strexd($tmp$$Register, $newval$$Register, $mem$$Address);
+    __ cmp($tmp$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif // !AARCH64
+
+#ifdef AARCH64
+instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp) %{
+  match(Set res (GetAndSetP mem newval));
+  effect(TEMP tmp, TEMP res);
+  size(12);
+  format %{ "loop:\n\t"
+            "LDREX    $res, $mem\n\t"
+            "STREX    $tmp, $newval, $mem\n\t"
+            "CBNZ_w   $tmp, loop" %}
+
+  ins_encode %{
+    Label loop;
+    Register base = reg_to_register_object($mem$$base);
+    __ bind(loop);
+    __ ldrex($res$$Register, base);
+    __ strex($tmp$$Register, $newval$$Register, base);
+    __ cbnz_w($tmp$$Register, loop);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#else
+instruct xchgP(memoryex mem, iRegP newval, iRegP res, iRegI tmp, flagsReg ccr) %{
+  match(Set res (GetAndSetP mem newval));
+  effect(KILL ccr, TEMP tmp, TEMP res);
+  size(16);
+  format %{ "loop: \n\t"
+            "LDREX    $res, $mem\n\t"
+            "STREX    $tmp, $newval, $mem\n\t"
+            "CMP      $tmp, 0 \n\t"
+            "B.ne     loop \n\t" %}
+
+  ins_encode %{
+    Label loop;
+    __ bind(loop);
+    __ ldrex($res$$Register,$mem$$Address);
+    __ strex($tmp$$Register, $newval$$Register, $mem$$Address);
+    __ cmp($tmp$$Register, 0);
+    __ b(loop, ne);
+  %}
+  ins_pipe( long_memory_op );
+%}
+#endif // !AARCH64
+
+//---------------------
+// Subtraction Instructions
+// Register Subtraction
+instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (SubI src1 src2));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct subshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct subshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (SubI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,$src2<<$src3\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct subsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct subsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (SubI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,$src2>>$src3\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct subshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct subshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (SubI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,$src2>>>$src3\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct rsbshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI (LShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1<<$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct rsbshlI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (SubI (LShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1<<$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct rsbsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI (RShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1>>$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct rsbsarI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (SubI (RShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1>>$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct rsbshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI (URShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct rsbshrI_reg_imm_reg(iRegI dst, iRegI src1, immU5 src2, iRegI src3) %{
+  match(Set dst (SubI (URShiftI src1 src2) src3));
+
+  size(4);
+  format %{ "RSB    $dst,$src3,$src1>>>$src2" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src3$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+// Immediate Subtraction
+instruct subI_reg_aimmI(iRegI dst, iRegI src1, aimmI src2) %{
+  match(Set dst (SubI src1 src2));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct subI_reg_immRotneg(iRegI dst, iRegI src1, aimmIneg src2) %{
+  match(Set dst (AddI src1 src2));
+
+  size(4);
+  format %{ "sub_32 $dst,$src1,-($src2)\t! int" %}
+  ins_encode %{
+    __ sub_32($dst$$Register, $src1$$Register, -$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+#ifndef AARCH64
+instruct subI_immRot_reg(iRegI dst, immIRot src1, iRegI src2) %{
+  match(Set dst (SubI src1 src2));
+
+  size(4);
+  format %{ "RSB    $dst,$src2,src1" %}
+  ins_encode %{
+    __ rsb($dst$$Register, $src2$$Register, $src1$$constant);
+  %}
+  ins_pipe(ialu_zero_reg);
+%}
+#endif
+
+// Register Subtraction
+#ifdef AARCH64
+instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (SubL src1 src2));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#else
+instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2, flagsReg icc ) %{
+  match(Set dst (SubL src1 src2));
+  effect (KILL icc);
+
+  size(8);
+  format %{ "SUBS   $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
+            "SBC    $dst.hi,$src1.hi,$src2.hi" %}
+  ins_encode %{
+    __ subs($dst$$Register, $src1$$Register, $src2$$Register);
+    __ sbc($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+#ifdef AARCH64
+// Immediate Subtraction
+instruct subL_reg_aimm(iRegL dst, iRegL src1, aimmL src2) %{
+  match(Set dst (SubL src1 src2));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct subL_reg_immLneg(iRegL dst, iRegL src1, aimmLneg src2) %{
+  match(Set dst (AddL src1 src2));
+
+  size(4);
+  format %{ "SUB    $dst,$src1,-($src2)\t! long" %}
+  ins_encode %{
+    __ sub($dst$$Register, $src1$$Register, -$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// TODO
+#endif
+
+#ifndef AARCH64
+// Immediate Subtraction
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct subL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con, flagsReg icc) %{
+  match(Set dst (SubL src1 con));
+  effect (KILL icc);
+
+  size(8);
+  format %{ "SUB    $dst.lo,$src1.lo,$con\t! long\n\t"
+            "SBC    $dst.hi,$src1.hi,0" %}
+  ins_encode %{
+    __ subs($dst$$Register, $src1$$Register, $con$$constant);
+    __ sbc($dst$$Register->successor(), $src1$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Long negation
+instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2, flagsReg icc) %{
+  match(Set dst (SubL zero src2));
+  effect (KILL icc);
+
+  size(8);
+  format %{ "RSBS   $dst.lo,$src2.lo,0\t! long\n\t"
+            "RSC    $dst.hi,$src2.hi,0" %}
+  ins_encode %{
+    __ rsbs($dst$$Register, $src2$$Register, 0);
+    __ rsc($dst$$Register->successor(), $src2$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_zero_reg);
+%}
+#endif // !AARCH64
+
+// Multiplication Instructions
+// Integer Multiplication
+// Register Multiplication
+instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (MulI src1 src2));
+
+  size(4);
+  format %{ "mul_32 $dst,$src1,$src2" %}
+  ins_encode %{
+    __ mul_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(imul_reg_reg);
+%}
+
+#ifdef AARCH64
+instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (MulL src1 src2));
+  size(4);
+  format %{ "MUL  $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ mul($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(imul_reg_reg);
+%}
+#else
+instruct mulL_lo1_hi2(iRegL dst, iRegL src1, iRegL src2) %{
+  effect(DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "MUL  $dst.hi,$src1.lo,$src2.hi\t! long" %}
+  ins_encode %{
+    __ mul($dst$$Register->successor(), $src1$$Register, $src2$$Register->successor());
+  %}
+  ins_pipe(imul_reg_reg);
+%}
+
+instruct mulL_hi1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(8);
+  format %{ "MLA  $dst.hi,$src1.hi,$src2.lo,$dst.hi\t! long\n\t"
+            "MOV  $dst.lo, 0"%}
+  ins_encode %{
+    __ mla($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register, $dst$$Register->successor());
+    __ mov($dst$$Register, 0);
+  %}
+  ins_pipe(imul_reg_reg);
+%}
+
+instruct mulL_lo1_lo2(iRegL dst, iRegL src1, iRegL src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "UMLAL  $dst.lo,$dst.hi,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ umlal($dst$$Register, $dst$$Register->successor(), $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(imul_reg_reg);
+%}
+
+instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (MulL src1 src2));
+
+  expand %{
+    mulL_lo1_hi2(dst, src1, src2);
+    mulL_hi1_lo2(dst, src1, src2);
+    mulL_lo1_lo2(dst, src1, src2);
+  %}
+%}
+#endif // !AARCH64
+
+// Integer Division
+// Register Division
+#ifdef AARCH64
+instruct divI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (DivI src1 src2));
+
+  size(4);
+  format %{ "SDIV    $dst,$src1,$src2\t! 32-bit" %}
+  ins_encode %{
+    __ sdiv_w($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg); // FIXME
+%}
+#else
+instruct divI_reg_reg(R1RegI dst, R0RegI src1, R2RegI src2, LRRegP lr, flagsReg ccr) %{
+  match(Set dst (DivI src1 src2));
+  effect( KILL ccr, KILL src1, KILL src2, KILL lr);
+  ins_cost((2+71)*DEFAULT_COST);
+
+  format %{ "DIV   $dst,$src1,$src2 ! call to StubRoutines::Arm::idiv_irem_entry()" %}
+  ins_encode %{
+    __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
+  %}
+  ins_pipe(sdiv_reg_reg);
+%}
+#endif
+
+// Register Long Division
+#ifdef AARCH64
+instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (DivL src1 src2));
+
+  size(4);
+  format %{ "SDIV    $dst,$src1,$src2" %}
+  ins_encode %{
+    __ sdiv($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg); // FIXME
+%}
+#else
+instruct divL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
+  match(Set dst (DivL src1 src2));
+  effect(CALL);
+  ins_cost(DEFAULT_COST*71);
+  format %{ "DIVL  $src1,$src2,$dst\t! long ! call to SharedRuntime::ldiv" %}
+  ins_encode %{
+    address target = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
+    __ call(target, relocInfo::runtime_call_type);
+  %}
+  ins_pipe(divL_reg_reg);
+%}
+#endif
+
+// Integer Remainder
+// Register Remainder
+#ifdef AARCH64
+#ifdef TODO
+instruct msubI_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (SubI src1 (MulI src2 src3)));
+
+  size(4);
+  format %{ "MSUB    $dst,$src2,$src3,$src1\t! 32-bit\n\t" %}
+  ins_encode %{
+    __ msub_w($dst$$Register, $src2$$Register, $src3$$Register, $src1$$Register);
+  %}
+  ins_pipe(ialu_reg_reg); // FIXME
+%}
+#endif
+
+instruct modI_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp) %{
+  match(Set dst (ModI src1 src2));
+  effect(TEMP temp);
+
+  size(8);
+  format %{ "SDIV    $temp,$src1,$src2\t! 32-bit\n\t"
+            "MSUB    $dst,$src2,$temp,$src1\t! 32-bit\n\t" %}
+  ins_encode %{
+    __ sdiv_w($temp$$Register, $src1$$Register, $src2$$Register);
+    __ msub_w($dst$$Register, $src2$$Register, $temp$$Register, $src1$$Register);
+  %}
+  ins_pipe(ialu_reg_reg); // FIXME
+%}
+#else
+instruct modI_reg_reg(R0RegI dst, R0RegI src1, R2RegI src2, R1RegI temp, LRRegP lr, flagsReg ccr ) %{
+  match(Set dst (ModI src1 src2));
+  effect( KILL ccr, KILL temp, KILL src2, KILL lr);
+
+  format %{ "MODI   $dst,$src1,$src2\t ! call to StubRoutines::Arm::idiv_irem_entry" %}
+  ins_encode %{
+    __ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
+  %}
+  ins_pipe(sdiv_reg_reg);
+%}
+#endif
+
+// Register Long Remainder
+#ifdef AARCH64
+instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2, iRegL temp) %{
+  match(Set dst (ModL src1 src2));
+  effect(TEMP temp);
+
+  size(8);
+  format %{ "SDIV    $temp,$src1,$src2\n\t"
+            "MSUB    $dst,$src2,$temp,$src1" %}
+  ins_encode %{
+    __ sdiv($temp$$Register, $src1$$Register, $src2$$Register);
+    __ msub($dst$$Register, $src2$$Register, $temp$$Register, $src1$$Register);
+  %}
+  ins_pipe(ialu_reg_reg); // FIXME
+%}
+#else
+instruct modL_reg_reg(R0R1RegL dst, R2R3RegL src1, R0R1RegL src2) %{
+  match(Set dst (ModL src1 src2));
+  effect(CALL);
+  ins_cost(MEMORY_REF_COST); // FIXME
+  format %{ "modL    $dst,$src1,$src2\t ! call to SharedRuntime::lrem" %}
+  ins_encode %{
+    address target = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
+    __ call(target, relocInfo::runtime_call_type);
+  %}
+  ins_pipe(divL_reg_reg);
+%}
+#endif
+
+// Integer Shift Instructions
+
+// Register Shift Left
+instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (LShiftI src1 src2));
+
+  size(4);
+#ifdef AARCH64
+  format %{ "LSLV   $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ lslv_w($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  format %{ "LSL  $dst,$src1,$src2 \n\t" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Register Shift Left Immediate
+instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
+  match(Set dst (LShiftI src1 src2));
+
+  size(4);
+#ifdef AARCH64
+  format %{ "LSL_w  $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ _lsl($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+#else
+  format %{ "LSL    $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ logical_shift_left($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+#endif
+  ins_pipe(ialu_reg_imm);
+%}
+
+#ifndef AARCH64
+instruct shlL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{"OR  $dst.hi,$dst.hi,($src1.hi << $src2)"  %}
+  ins_encode %{
+    __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct shlL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "LSL  $dst.lo,$src1.lo,$src2 \n\t" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct shlL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
+  effect(DEF dst, USE src1, USE src2, KILL ccr);
+  size(16);
+  format %{ "SUBS  $dst.hi,$src2,32 \n\t"
+            "LSLpl $dst.hi,$src1.lo,$dst.hi \n\t"
+            "RSBmi $dst.hi,$dst.hi,0 \n\t"
+            "LSRmi $dst.hi,$src1.lo,$dst.hi" %}
+
+  ins_encode %{
+    // $src1$$Register and $dst$$Register->successor() can't be the same
+    __ subs($dst$$Register->successor(), $src2$$Register, 32);
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $dst$$Register->successor()), pl);
+    __ rsb($dst$$Register->successor(), $dst$$Register->successor(), 0, mi);
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsr, $dst$$Register->successor()), mi);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif // !AARCH64
+
+instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LSLV  $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ lslv($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+#else
+  expand %{
+    flagsReg ccr;
+    shlL_reg_reg_overlap(dst, src1, src2, ccr);
+    shlL_reg_reg_merge_hi(dst, src1, src2);
+    shlL_reg_reg_merge_lo(dst, src1, src2);
+  %}
+#endif
+%}
+
+#ifdef AARCH64
+instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+  size(4);
+  format %{ "LSL    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ logical_shift_left($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// Register Shift Left Immediate
+instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+  size(8);
+  format %{ "LSL   $dst.hi,$src1.lo,$src2-32\t! or mov if $src2==32\n\t"
+            "MOV   $dst.lo, 0" %}
+  ins_encode %{
+    if ($src2$$constant == 32) {
+      __ mov($dst$$Register->successor(), $src1$$Register);
+    } else {
+      __ mov($dst$$Register->successor(), AsmOperand($src1$$Register, lsl, $src2$$constant-32));
+    }
+    __ mov($dst$$Register, 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct shlL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+  size(12);
+  format %{ "LSL   $dst.hi,$src1.lo,$src2\n\t"
+            "OR    $dst.hi, $dst.hi, $src1.lo >> 32-$src2\n\t"
+            "LSL   $dst.lo,$src1.lo,$src2" %}
+  ins_encode %{
+    // The order of the following 3 instructions matters: src1.lo and
+    // dst.hi can't overlap but src.hi and dst.hi can.
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsl, $src2$$constant));
+    __ orr($dst$$Register->successor(), $dst$$Register->successor(), AsmOperand($src1$$Register, lsr, 32-$src2$$constant));
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsl, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif // !AARCH64
+
+// Register Arithmetic Shift Right
+instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (RShiftI src1 src2));
+  size(4);
+#ifdef AARCH64
+  format %{ "ASRV   $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ asrv_w($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  format %{ "ASR    $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$Register));
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Register Arithmetic Shift Right Immediate
+instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
+  match(Set dst (RShiftI src1 src2));
+
+  size(4);
+#ifdef AARCH64
+  format %{ "ASR_w  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ _asr_w($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+#else
+  format %{ "ASR    $dst,$src1,$src2" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, asr, $src2$$constant));
+  %}
+#endif
+  ins_pipe(ialu_reg_imm);
+%}
+
+#ifndef AARCH64
+// Register Shift Right Arithmetic Long
+instruct sarL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "OR  $dst.lo,$dst.lo,($src1.lo >> $src2)"  %}
+  ins_encode %{
+    __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct sarL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "ASR  $dst.hi,$src1.hi,$src2 \n\t" %}
+  ins_encode %{
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct sarL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
+  effect(DEF dst, USE src1, USE src2, KILL ccr);
+  size(16);
+  format %{ "SUBS  $dst.lo,$src2,32 \n\t"
+            "ASRpl $dst.lo,$src1.hi,$dst.lo \n\t"
+            "RSBmi $dst.lo,$dst.lo,0 \n\t"
+            "LSLmi $dst.lo,$src1.hi,$dst.lo" %}
+
+  ins_encode %{
+    // $src1$$Register->successor() and $dst$$Register can't be the same
+    __ subs($dst$$Register, $src2$$Register, 32);
+    __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $dst$$Register), pl);
+    __ rsb($dst$$Register, $dst$$Register, 0, mi);
+    __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif // !AARCH64
+
+instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+#ifdef AARCH64
+  size(4);
+  format %{ "ASRV  $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ asrv($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+#else
+  expand %{
+    flagsReg ccr;
+    sarL_reg_reg_overlap(dst, src1, src2, ccr);
+    sarL_reg_reg_merge_lo(dst, src1, src2);
+    sarL_reg_reg_merge_hi(dst, src1, src2);
+  %}
+#endif
+%}
+
+// Register Shift Left Immediate
+#ifdef AARCH64
+instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+  size(4);
+  format %{ "ASR    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ _asr($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+  size(8);
+  format %{ "ASR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
+            "ASR   $dst.hi,$src1.hi, $src2" %}
+  ins_encode %{
+    if ($src2$$constant == 32) {
+      __ mov($dst$$Register, $src1$$Register->successor());
+    } else{
+      __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), asr, $src2$$constant-32));
+    }
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, 0));
+  %}
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct sarL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
+  match(Set dst (RShiftL src1 src2));
+  size(12);
+  format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
+            "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
+            "ASR   $dst.hi,$src1.hi,$src2" %}
+  ins_encode %{
+    // The order of the following 3 instructions matters: src1.lo and
+    // dst.hi can't overlap but src.hi and dst.hi can.
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
+    __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), asr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
+// Register Shift Right
+instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (URShiftI src1 src2));
+  size(4);
+#ifdef AARCH64
+  format %{ "LSRV   $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ lsrv_w($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  format %{ "LSR    $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Register Shift Right Immediate
+instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
+  match(Set dst (URShiftI src1 src2));
+
+  size(4);
+#ifdef AARCH64
+  format %{ "LSR_w  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ _lsr_w($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+#else
+  format %{ "LSR    $dst,$src1,$src2" %}
+  ins_encode %{
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
+  %}
+#endif
+  ins_pipe(ialu_reg_imm);
+%}
+
+#ifndef AARCH64
+// Register Shift Right
+instruct shrL_reg_reg_merge_lo(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "OR   $dst.lo,$dst,($src1.lo >>> $src2)"  %}
+  ins_encode %{
+    __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct shrL_reg_reg_merge_hi(iRegL dst, iRegL src1, iRegI src2) %{
+  effect(USE_DEF dst, USE src1, USE src2);
+  size(4);
+  format %{ "LSR  $dst.hi,$src1.hi,$src2 \n\t" %}
+  ins_encode %{
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct shrL_reg_reg_overlap(iRegL dst, iRegL src1, iRegI src2, flagsReg ccr) %{
+  effect(DEF dst, USE src1, USE src2, KILL ccr);
+  size(16);
+  format %{ "SUBS  $dst,$src2,32 \n\t"
+            "LSRpl $dst,$src1.hi,$dst \n\t"
+            "RSBmi $dst,$dst,0 \n\t"
+            "LSLmi $dst,$src1.hi,$dst" %}
+
+  ins_encode %{
+    // $src1$$Register->successor() and $dst$$Register can't be the same
+    __ subs($dst$$Register, $src2$$Register, 32);
+    __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $dst$$Register), pl);
+    __ rsb($dst$$Register, $dst$$Register, 0, mi);
+    __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsl, $dst$$Register), mi);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif // !AARCH64
+
+instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+#ifdef AARCH64
+  size(4);
+  format %{ "LSRV  $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ lsrv($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+#else
+  expand %{
+    flagsReg ccr;
+    shrL_reg_reg_overlap(dst, src1, src2, ccr);
+    shrL_reg_reg_merge_lo(dst, src1, src2);
+    shrL_reg_reg_merge_hi(dst, src1, src2);
+  %}
+#endif
+%}
+
+// Register Shift Right Immediate
+#ifdef AARCH64
+instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  size(4);
+  format %{ "LSR    $dst,$src1,$src2" %}
+  ins_encode %{
+    __ _lsr($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6Big src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  size(8);
+  format %{ "LSR   $dst.lo,$src1.hi,$src2-32\t! or mov if $src2==32\n\t"
+            "MOV   $dst.hi, 0" %}
+  ins_encode %{
+    if ($src2$$constant == 32) {
+      __ mov($dst$$Register, $src1$$Register->successor());
+    } else {
+      __ mov($dst$$Register, AsmOperand($src1$$Register->successor(), lsr, $src2$$constant-32));
+    }
+    __ mov($dst$$Register->successor(), 0);
+  %}
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct shrL_reg_imm5(iRegL dst, iRegL src1, immU5 src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  size(12);
+  format %{ "LSR   $dst.lo,$src1.lo,$src2\n\t"
+            "OR    $dst.lo, $dst.lo, $src1.hi << 32-$src2\n\t"
+            "LSR   $dst.hi,$src1.hi,$src2" %}
+  ins_encode %{
+    // The order of the following 3 instructions matters: src1.lo and
+    // dst.hi can't overlap but src.hi and dst.hi can.
+    __ mov($dst$$Register, AsmOperand($src1$$Register, lsr, $src2$$constant));
+    __ orr($dst$$Register, $dst$$Register, AsmOperand($src1$$Register->successor(), lsl, 32-$src2$$constant));
+    __ mov($dst$$Register->successor(), AsmOperand($src1$$Register->successor(), lsr, $src2$$constant));
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif // !AARCH64
+
+
+instruct shrP_reg_imm5(iRegX dst, iRegP src1, immU5 src2) %{
+  match(Set dst (URShiftI (CastP2X src1) src2));
+  size(4);
+  format %{ "LSR    $dst,$src1,$src2\t! Cast ptr $src1 to int and shift" %}
+  ins_encode %{
+    __ logical_shift_right($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+//----------Floating Point Arithmetic Instructions-----------------------------
+
+//  Add float single precision
+instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (AddF src1 src2));
+
+  size(4);
+  format %{ "FADDS  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ add_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(faddF_reg_reg);
+%}
+
+//  Add float double precision
+instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (AddD src1 src2));
+
+  size(4);
+  format %{ "FADDD  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ add_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(faddD_reg_reg);
+%}
+
+//  Sub float single precision
+instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (SubF src1 src2));
+
+  size(4);
+  format %{ "FSUBS  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ sub_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(faddF_reg_reg);
+%}
+
+//  Sub float double precision
+instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (SubD src1 src2));
+
+  size(4);
+  format %{ "FSUBD  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ sub_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(faddD_reg_reg);
+%}
+
+//  Mul float single precision
+instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (MulF src1 src2));
+
+  size(4);
+  format %{ "FMULS  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ mul_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(fmulF_reg_reg);
+%}
+
+//  Mul float double precision
+instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (MulD src1 src2));
+
+  size(4);
+  format %{ "FMULD  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ mul_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(fmulD_reg_reg);
+%}
+
+//  Div float single precision
+instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (DivF src1 src2));
+
+  size(4);
+  format %{ "FDIVS  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ div_float($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(fdivF_reg_reg);
+%}
+
+//  Div float double precision
+instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (DivD src1 src2));
+
+  size(4);
+  format %{ "FDIVD  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ div_double($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+
+  ins_pipe(fdivD_reg_reg);
+%}
+
+//  Absolute float double precision
+instruct absD_reg(regD dst, regD src) %{
+  match(Set dst (AbsD src));
+
+  size(4);
+  format %{ "FABSd  $dst,$src" %}
+  ins_encode %{
+    __ abs_double($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(faddD_reg);
+%}
+
+//  Absolute float single precision
+instruct absF_reg(regF dst, regF src) %{
+  match(Set dst (AbsF src));
+  format %{ "FABSs  $dst,$src" %}
+  ins_encode %{
+    __ abs_float($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(faddF_reg);
+%}
+
+instruct negF_reg(regF dst, regF src) %{
+  match(Set dst (NegF src));
+
+  size(4);
+  format %{ "FNEGs  $dst,$src" %}
+  ins_encode %{
+    __ neg_float($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(faddF_reg);
+%}
+
+instruct negD_reg(regD dst, regD src) %{
+  match(Set dst (NegD src));
+
+  format %{ "FNEGd  $dst,$src" %}
+  ins_encode %{
+    __ neg_double($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(faddD_reg);
+%}
+
+//  Sqrt float double precision
+instruct sqrtF_reg_reg(regF dst, regF src) %{
+  match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
+
+  size(4);
+  format %{ "FSQRTS $dst,$src" %}
+  ins_encode %{
+    __ sqrt_float($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(fdivF_reg_reg);
+%}
+
+//  Sqrt float double precision
+instruct sqrtD_reg_reg(regD dst, regD src) %{
+  match(Set dst (SqrtD src));
+
+  size(4);
+  format %{ "FSQRTD $dst,$src" %}
+  ins_encode %{
+    __ sqrt_double($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(fdivD_reg_reg);
+%}
+
+//----------Logical Instructions-----------------------------------------------
+// And Instructions
+// Register And
+instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (AndI src1 src2));
+
+  size(4);
+  format %{ "and_32 $dst,$src1,$src2" %}
+  ins_encode %{
+    __ and_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct andshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AndI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "AND    $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct andshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (AndI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "and_32 $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct andsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AndI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "AND    $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct andsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (AndI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "and_32 $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct andshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (AndI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "AND    $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct andshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (AndI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "and_32 $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ and_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Immediate And
+instruct andI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
+  match(Set dst (AndI src1 src2));
+
+  size(4);
+  format %{ "and_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ and_32($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+#ifndef AARCH64
+instruct andI_reg_limmn(iRegI dst, iRegI src1, limmIn src2) %{
+  match(Set dst (AndI src1 src2));
+
+  size(4);
+  format %{ "bic    $dst,$src1,~$src2\t! int" %}
+  ins_encode %{
+    __ bic($dst$$Register, $src1$$Register, ~$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
+// Register And Long
+instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (AndL src1 src2));
+
+  ins_cost(DEFAULT_COST);
+#ifdef AARCH64
+  size(4);
+  format %{ "AND    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  size(8);
+  format %{ "AND    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, $src2$$Register);
+    __ andr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifdef AARCH64
+// Immediate And
+instruct andL_reg_limm(iRegL dst, iRegL src1, limmL src2) %{
+  match(Set dst (AndL src1 src2));
+
+  size(4);
+  format %{ "AND    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, (uintx)$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct andL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
+  match(Set dst (AndL src1 con));
+  ins_cost(DEFAULT_COST);
+  size(8);
+  format %{ "AND    $dst,$src1,$con\t! long" %}
+  ins_encode %{
+    __ andr($dst$$Register, $src1$$Register, $con$$constant);
+    __ andr($dst$$Register->successor(), $src1$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
+// Or Instructions
+// Register Or
+instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (OrI src1 src2));
+
+  size(4);
+  format %{ "orr_32 $dst,$src1,$src2\t! int" %}
+  ins_encode %{
+    __ orr_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct orshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (OrI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "OR    $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct orshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (OrI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "orr_32 $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct orsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (OrI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "OR    $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct orsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (OrI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "orr_32 $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct orshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (OrI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "OR    $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct orshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (OrI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "orr_32 $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ orr_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Immediate Or
+instruct orI_reg_limm(iRegI dst, iRegI src1, limmI src2) %{
+  match(Set dst (OrI src1 src2));
+
+  size(4);
+  format %{ "orr_32  $dst,$src1,$src2" %}
+  ins_encode %{
+    __ orr_32($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+// TODO: orn_32 with limmIn
+
+// Register Or Long
+instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (OrL src1 src2));
+
+  ins_cost(DEFAULT_COST);
+#ifdef AARCH64
+  size(4);
+  format %{ "OR     $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  size(8);
+  format %{ "OR     $dst.lo,$src1.lo,$src2.lo\t! long\n\t"
+            "OR     $dst.hi,$src1.hi,$src2.hi" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, $src2$$Register);
+    __ orr($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifdef AARCH64
+instruct orL_reg_limm(iRegL dst, iRegL src1, limmL src2) %{
+  match(Set dst (OrL src1 src2));
+
+  size(4);
+  format %{ "ORR    $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, (uintx)$src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct orL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
+  match(Set dst (OrL src1 con));
+  ins_cost(DEFAULT_COST);
+  size(8);
+  format %{ "OR     $dst.lo,$src1.lo,$con\t! long\n\t"
+            "OR     $dst.hi,$src1.hi,$con" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, $con$$constant);
+    __ orr($dst$$Register->successor(), $src1$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif
+
+#ifdef TODO
+// Use SPRegP to match Rthread (TLS register) without spilling.
+// Use store_ptr_RegP to match Rthread (TLS register) without spilling.
+// Use sp_ptr_RegP to match Rthread (TLS register) without spilling.
+instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
+  match(Set dst (OrI src1 (CastP2X src2)));
+  size(4);
+  format %{ "OR     $dst,$src1,$src2" %}
+  ins_encode %{
+    __ orr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+// Xor Instructions
+// Register Xor
+instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
+  match(Set dst (XorI src1 src2));
+
+  size(4);
+  format %{ "eor_32 $dst,$src1,$src2" %}
+  ins_encode %{
+    __ eor_32($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct xorshlI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (XorI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "XOR    $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct xorshlI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (XorI src1 (LShiftI src2 src3)));
+
+  size(4);
+  format %{ "eor_32 $dst,$src1,$src2<<$src3" %}
+  ins_encode %{
+    __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsl, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct xorsarI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (XorI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "XOR    $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct xorsarI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (XorI src1 (RShiftI src2 src3)));
+
+  size(4);
+  format %{ "eor_32 $dst,$src1,$src2>>$src3" %}
+  ins_encode %{
+    __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, asr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifndef AARCH64
+instruct xorshrI_reg_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) %{
+  match(Set dst (XorI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "XOR    $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$Register));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+#endif
+
+instruct xorshrI_reg_reg_imm(iRegI dst, iRegI src1, iRegI src2, immU5 src3) %{
+  match(Set dst (XorI src1 (URShiftI src2 src3)));
+
+  size(4);
+  format %{ "eor_32 $dst,$src1,$src2>>>$src3" %}
+  ins_encode %{
+    __ eor_32($dst$$Register, $src1$$Register, AsmOperand($src2$$Register, lsr, $src3$$constant));
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Immediate Xor
+instruct xorI_reg_imm(iRegI dst, iRegI src1, limmI src2) %{
+  match(Set dst (XorI src1 src2));
+
+  size(4);
+  format %{ "eor_32 $dst,$src1,$src2" %}
+  ins_encode %{
+    __ eor_32($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Register Xor Long
+instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
+  match(Set dst (XorL src1 src2));
+  ins_cost(DEFAULT_COST);
+#ifdef AARCH64
+  size(4);
+  format %{ "XOR     $dst,$src1,$src2\t! long" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+#else
+  size(8);
+  format %{ "XOR     $dst.hi,$src1.hi,$src2.hi\t! long\n\t"
+            "XOR     $dst.lo,$src1.lo,$src2.lo\t! long" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, $src2$$Register);
+    __ eor($dst$$Register->successor(), $src1$$Register->successor(), $src2$$Register->successor());
+  %}
+#endif
+  ins_pipe(ialu_reg_reg);
+%}
+
+#ifdef AARCH64
+instruct xorL_reg_limmL(iRegL dst, iRegL src1, limmL con) %{
+  match(Set dst (XorL src1 con));
+  ins_cost(DEFAULT_COST);
+  size(4);
+  format %{ "EOR     $dst,$src1,$con\t! long" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, (uintx)$con$$constant);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#else
+// TODO: try immLRot2 instead, (0, $con$$constant) becomes
+// (hi($con$$constant), lo($con$$constant)) becomes
+instruct xorL_reg_immRot(iRegL dst, iRegL src1, immLlowRot con) %{
+  match(Set dst (XorL src1 con));
+  ins_cost(DEFAULT_COST);
+  size(8);
+  format %{ "XOR     $dst.hi,$src1.hi,$con\t! long\n\t"
+            "XOR     $dst.lo,$src1.lo,0\t! long" %}
+  ins_encode %{
+    __ eor($dst$$Register, $src1$$Register, $con$$constant);
+    __ eor($dst$$Register->successor(), $src1$$Register->successor(), 0);
+  %}
+  ins_pipe(ialu_reg_imm);
+%}
+#endif // AARCH64
+
+//----------Convert to Boolean-------------------------------------------------
+instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
+  match(Set dst (Conv2B src));
+  effect(KILL ccr);
+#ifdef AARCH64
+  size(8);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "cmp_32 $src,ZR\n\t"
+            "cset_w $dst, ne" %}
+  ins_encode %{
+    __ cmp_32($src$$Register, ZR);
+    __ cset_w($dst$$Register, ne);
+  %}
+#else
+  size(12);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "TST    $src,$src \n\t"
+            "MOV    $dst, 0   \n\t"
+            "MOV.ne $dst, 1" %}
+  ins_encode %{ // FIXME: can do better?
+    __ tst($src$$Register, $src$$Register);
+    __ mov($dst$$Register, 0);
+    __ mov($dst$$Register, 1, ne);
+  %}
+#endif
+  ins_pipe(ialu_reg_ialu);
+%}
+
+instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
+  match(Set dst (Conv2B src));
+  effect(KILL ccr);
+#ifdef AARCH64
+  size(8);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "CMP    $src,ZR\n\t"
+            "cset   $dst, ne" %}
+  ins_encode %{
+    __ cmp($src$$Register, ZR);
+    __ cset($dst$$Register, ne);
+  %}
+#else
+  size(12);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "TST    $src,$src \n\t"
+            "MOV    $dst, 0   \n\t"
+            "MOV.ne $dst, 1" %}
+  ins_encode %{
+    __ tst($src$$Register, $src$$Register);
+    __ mov($dst$$Register, 0);
+    __ mov($dst$$Register, 1, ne);
+  %}
+#endif
+  ins_pipe(ialu_reg_ialu);
+%}
+
+instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
+  match(Set dst (CmpLTMask p q));
+  effect( KILL ccr );
+#ifdef AARCH64
+  size(8);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "CMP_w   $p,$q\n\t"
+            "CSETM_w $dst, lt" %}
+  ins_encode %{
+    __ cmp_w($p$$Register, $q$$Register);
+    __ csetm_w($dst$$Register, lt);
+  %}
+#else
+  ins_cost(DEFAULT_COST*3);
+  format %{ "CMP    $p,$q\n\t"
+            "MOV    $dst, #0\n\t"
+            "MOV.lt $dst, #-1" %}
+  ins_encode %{
+    __ cmp($p$$Register, $q$$Register);
+    __ mov($dst$$Register, 0);
+    __ mvn($dst$$Register, 0, lt);
+  %}
+#endif
+  ins_pipe(ialu_reg_reg_ialu);
+%}
+
+instruct cmpLTMask_reg_imm( iRegI dst, iRegI p, aimmI q, flagsReg ccr ) %{
+  match(Set dst (CmpLTMask p q));
+  effect( KILL ccr );
+#ifdef AARCH64
+  size(8);
+  ins_cost(DEFAULT_COST*2);
+  format %{ "CMP_w   $p,$q\n\t"
+            "CSETM_w $dst, lt" %}
+  ins_encode %{
+    __ cmp_w($p$$Register, $q$$constant);
+    __ csetm_w($dst$$Register, lt);
+  %}
+#else
+  ins_cost(DEFAULT_COST*3);
+  format %{ "CMP    $p,$q\n\t"
+            "MOV    $dst, #0\n\t"
+            "MOV.lt $dst, #-1" %}
+  ins_encode %{
+    __ cmp($p$$Register, $q$$constant);
+    __ mov($dst$$Register, 0);
+    __ mvn($dst$$Register, 0, lt);
+  %}
+#endif
+  ins_pipe(ialu_reg_reg_ialu);
+%}
+
+#ifdef AARCH64
+instruct cadd_cmpLTMask3( iRegI dst, iRegI p, iRegI q, iRegI y, iRegI x, flagsReg ccr ) %{
+  match(Set dst (AddI (AndI (CmpLTMask p q) y) x));
+  effect( TEMP dst, KILL ccr );
+  size(12);
+  ins_cost(DEFAULT_COST*3);