changeset 7881:694f5e5bb982

Merge
author kvn
date Tue, 20 Jan 2015 15:24:58 -0800
parents 20946e467375 d498aba2c736
children c62971d23589
files agent/src/os/linux/LinuxDebuggerLocal.c src/os/linux/vm/os_linux.cpp src/share/vm/memory/metaspace.cpp src/share/vm/opto/c2_globals.hpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/os.hpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp src/share/vm/utilities/globalDefinitions.hpp
diffstat 187 files changed, 54768 insertions(+), 23 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Tue Jan 20 10:22:43 2015 -0800
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Tue Jan 20 15:24:58 2015 -0800
@@ -345,7 +345,7 @@
   return (err == PS_OK)? array : 0;
 }
 
-#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64)
+#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64) || defined(aarch64)
 JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0
   (JNIEnv *env, jobject this_obj, jint lwp_id) {
 
@@ -367,6 +367,9 @@
 #ifdef amd64
 #define NPRGREG sun_jvm_hotspot_debugger_amd64_AMD64ThreadContext_NPRGREG
 #endif
+#ifdef aarch64
+#define NPRGREG 32
+#endif
 #if defined(sparc) || defined(sparcv9)
 #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG
 #endif
@@ -466,6 +469,12 @@
   regs[REG_INDEX(R_O7)]  = gregs.u_regs[14];
 #endif /* sparc */
 
+#if defined(aarch64)
+
+#define REG_INDEX(reg) sun_jvm_hotspot_debugger_aarch64_AARCH64ThreadContext_##reg
+
+#endif /* aarch64 */
+
 #ifdef ppc64
 #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg
 
--- a/agent/src/os/linux/libproc.h	Tue Jan 20 10:22:43 2015 -0800
+++ b/agent/src/os/linux/libproc.h	Tue Jan 20 15:24:58 2015 -0800
@@ -71,6 +71,9 @@
 #if defined(sparc) || defined(sparcv9) || defined(ppc64)
 #define user_regs_struct  pt_regs
 #endif
+#if defined(aarch64)
+#define user_regs_struct user_pt_regs
+#endif
 
 // This C bool type must be int for compatibility with Linux calls and
 // it would be a mistake to equivalence it to C++ bool on many platforms
--- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Tue Jan 20 10:22:43 2015 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Tue Jan 20 15:24:58 2015 -0800
@@ -34,6 +34,7 @@
 import sun.jvm.hotspot.debugger.MachineDescription;
 import sun.jvm.hotspot.debugger.MachineDescriptionAMD64;
 import sun.jvm.hotspot.debugger.MachineDescriptionPPC64;
+import sun.jvm.hotspot.debugger.MachineDescriptionAArch64;
 import sun.jvm.hotspot.debugger.MachineDescriptionIA64;
 import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86;
 import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit;
@@ -591,6 +592,8 @@
             machDesc = new MachineDescriptionAMD64();
         } else if (cpu.equals("ppc64")) {
             machDesc = new MachineDescriptionPPC64();
+        } else if (cpu.equals("aarch64")) {
+            machDesc = new MachineDescriptionAArch64();
         } else if (cpu.equals("sparc")) {
             if (LinuxDebuggerLocal.getAddressSize()==8) {
                     machDesc = new MachineDescriptionSPARC64Bit();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/MachineDescriptionAArch64.java	Tue Jan 20 15:24:58 2015 -0800
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.debugger;
+
+public class MachineDescriptionAArch64 extends MachineDescriptionTwosComplement implements MachineDescription {
+  public long getAddressSize() {
+    return 8;
+  }
+
+  public boolean isLP64() {
+    return true;
+  }
+
+  public boolean isBigEndian() {
+    return false;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Tue Jan 20 10:22:43 2015 -0800
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java	Tue Jan 20 15:24:58 2015 -0800
@@ -61,7 +61,7 @@
       return "x86";
     } else if (cpu.equals("sparc") || cpu.equals("sparcv9")) {
       return "sparc";
-    } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64") || cpu.equals("ppc64")) {
+    } else if (cpu.equals("ia64") || cpu.equals("amd64") || cpu.equals("x86_64") || cpu.equals("ppc64") || cpu.equals("aarch64")) {
       return cpu;
     } else {
       try {
--- a/make/defs.make	Tue Jan 20 10:22:43 2015 -0800
+++ b/make/defs.make	Tue Jan 20 15:24:58 2015 -0800
@@ -286,7 +286,7 @@
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
   # is not explicitly listed below, it is treated as x86.
-  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
+  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 aarch64 zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
   ARCH/sparc64= sparc
@@ -296,6 +296,7 @@
   ARCH/ppc64  = ppc
   ARCH/ppc    = ppc
   ARCH/arm    = arm
+  ARCH/aarch64= aarch64
   ARCH/zero   = zero
 
   # BUILDARCH is usually the same as SRCARCH, except for sparcv9
@@ -326,11 +327,12 @@
   LIBARCH/sparcv9 = sparcv9
   LIBARCH/ia64    = ia64
   LIBARCH/ppc64   = ppc64
+  LIBARCH/aarch64 = aarch64
   LIBARCH/ppc     = ppc
   LIBARCH/arm     = arm
   LIBARCH/zero    = $(ZERO_LIBARCH)
 
-  LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
+  LP64_ARCH = sparcv9 amd64 ia64 ppc64 aarch64 zero
 endif
 
 # Required make macro settings for all platforms
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/aarch64.make	Tue Jan 20 15:24:58 2015 -0800
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
+OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
+# The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized
+OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
+# Must also specify if CPU is little endian
+CFLAGS += -DVM_LITTLE_ENDIAN
+
+CFLAGS += -D_LP64=1
--- a/make/linux/makefiles/buildtree.make	Tue Jan 20 10:22:43 2015 -0800
+++ b/make/linux/makefiles/buildtree.make	Tue Jan 20 15:24:58 2015 -0800
@@ -194,6 +194,7 @@
 DATA_MODE/sparcv9 = 64
 DATA_MODE/amd64 = 64
 DATA_MODE/ppc64 = 64
+DATA_MODE/aarch64 = 64
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
--- a/make/linux/makefiles/defs.make	Tue Jan 20 10:22:43 2015 -0800
+++ b/make/linux/makefiles/defs.make	Tue Jan 20 15:24:58 2015 -0800
@@ -130,6 +130,15 @@
   HS_ARCH = ppc
 endif
 
+# AARCH64
+ifeq ($(ARCH), aarch64)
+  ARCH_DATA_MODEL  = 64
+  MAKE_ARGS        += LP64=1
+  PLATFORM         = linux-aarch64
+  VM_PLATFORM      = linux_aarch64
+  HS_ARCH          = aarch64
+endif
+
 # On 32 bit linux we build server and client, on 64 bit just server.
 ifeq ($(JVM_VARIANTS),)
   ifeq ($(ARCH_DATA_MODEL), 32)
--- a/make/linux/makefiles/gcc.make	Tue Jan 20 10:22:43 2015 -0800
+++ b/make/linux/makefiles/gcc.make	Tue Jan 20 15:24:58 2015 -0800
@@ -172,6 +172,7 @@
 ARCHFLAG = $(ARCHFLAG/$(BUILDARCH))
 ARCHFLAG/i486    = -m32 -march=i586
 ARCHFLAG/amd64   = -m64 $(STACK_ALIGNMENT_OPT)
+ARCHFLAG/aarch64 =
 ARCHFLAG/ia64    =
 ARCHFLAG/sparc   = -m32 -mcpu=v9
 ARCHFLAG/sparcv9 = -m64 -mcpu=v9
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/platform_aarch64	Tue Jan 20 15:24:58 2015 -0800
@@ -0,0 +1,15 @@
+os_family = linux
+
+arch = aarch64
+
+arch_model = aarch64
+
+os_arch = linux_aarch64
+
+os_arch_model = linux_aarch64
+
+lib_arch = aarch64
+
+compiler = gcc
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DAARCH64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/aarch64/vm/aarch64.ad	Tue Jan 20 15:24:58 2015 -0800
@@ -0,0 +1,12255 @@
+//
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2014, Red Hat Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+// AArch64 Architecture Description File
+
+//----------REGISTER DEFINITION BLOCK------------------------------------------
+// This information is used by the matcher and the register allocator to
+// describe individual registers and classes of registers within the target
+// archtecture.
+
+register %{
+//----------Architecture Description Register Definitions----------------------
+// General Registers
+// "reg_def"  name ( register save type, C convention save type,
+//                   ideal register type, encoding );
+// Register Save Types:
+//
+// NS  = No-Save:       The register allocator assumes that these registers
+//                      can be used without saving upon entry to the method, &
+//                      that they do not need to be saved at call sites.
+//
+// SOC = Save-On-Call:  The register allocator assumes that these registers
+//                      can be used without saving upon entry to the method,
+//                      but that they must be saved at call sites.
+//
+// SOE = Save-On-Entry: The register allocator assumes that these registers
+//                      must be saved before using them upon entry to the
+//                      method, but they do not need to be saved at call
+//                      sites.
+//
+// AS  = Always-Save:   The register allocator assumes that these registers
+//                      must be saved before using them upon entry to the
+//                      method, & that they must be saved at call sites.
+//
+// Ideal Register Type is used to determine how to save & restore a
+// register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
+// spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
+//
+// The encoding number is the actual bit-pattern placed into the opcodes.
+
+// We must define the 64 bit int registers in two 32 bit halves, the
+// real lower register and a virtual upper half register. upper halves
+// are used by the register allocator but are not actually supplied as
+// operands to memory ops.
+//
+// follow the C1 compiler in making registers
+//
+//   r0-r7,r10-r26 volatile (caller save)
+//   r27-r32 system (no save, no allocate)
+//   r8-r9 invisible to the allocator (so we can use them as scratch regs)
+//
+// as regards Java usage. we don't use any callee save registers
+// because this makes it difficult to de-optimise a frame (see comment
+// in x86 implementation of Deoptimization::unwind_callee_save_values)
+//
+
+// General Registers
+
+reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
+reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
+reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
+reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
+reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
+reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
+reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
+reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
+reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
+reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
+reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
+reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
+reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
+reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
+reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
+reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
+reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
+reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
+reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
+reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
+reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
+reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
+reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
+reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
+reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
+reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
+reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
+reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
+reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
+reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
+reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
+reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
+reg_def R18     ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()        );
+reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next());
+reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
+reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
+reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
+reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
+reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
+reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
+reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
+reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
+reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
+reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
+reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
+reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
+reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
+reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
+reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
+reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
+reg_def R27     (  NS, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
+reg_def R27_H   (  NS, SOE, Op_RegI, 27, r27->as_VMReg()->next());
+reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
+reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
+reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
+reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
+reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
+reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
+reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
+reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
+
+// ----------------------------
+// Float/Double Registers
+// ----------------------------
+
+// Double Registers
+
+// The rules of ADL require that double registers be defined in pairs.
+// Each pair must be two 32-bit values, but not necessarily a pair of
+// single float registers. In each pair, ADLC-assigned register numbers
+// must be adjacent, with the lower number even. Finally, when the
+// CPU stores such a register pair to memory, the word associated with
+// the lower ADLC-assigned number must be stored to the lower address.
+
+// AArch64 has 32 floating-point registers. Each can store a vector of
+// single or double precision floating-point values up to 8 * 32
+// floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
+// use the first float or double element of the vector.
+
+// for Java use float registers v0-v15 are always save on call whereas
+// the platform ABI treats v8-v15 as callee save). float registers
+// v16-v31 are SOC as per the platform spec
+
+  reg_def V0   ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()         );
+  reg_def V0_H ( SOC, SOC, Op_RegF,  0, v0->as_VMReg()->next() );
+  reg_def V1   ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()         );
+  reg_def V1_H ( SOC, SOC, Op_RegF,  1, v1->as_VMReg()->next() );
+  reg_def V2   ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()         );
+  reg_def V2_H ( SOC, SOC, Op_RegF,  2, v2->as_VMReg()->next() );
+  reg_def V3   ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()         );
+  reg_def V3_H ( SOC, SOC, Op_RegF,  3, v3->as_VMReg()->next() );
+  reg_def V4   ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()         );
+  reg_def V4_H ( SOC, SOC, Op_RegF,  4, v4->as_VMReg()->next() );
+  reg_def V5   ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()         );
+  reg_def V5_H ( SOC, SOC, Op_RegF,  5, v5->as_VMReg()->next() );
+  reg_def V6   ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()         );
+  reg_def V6_H ( SOC, SOC, Op_RegF,  6, v6->as_VMReg()->next() );
+  reg_def V7   ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()         );
+  reg_def V7_H ( SOC, SOC, Op_RegF,  7, v7->as_VMReg()->next() );
+  reg_def V8   ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()         );
+  reg_def V8_H ( SOC, SOE, Op_RegF,  8, v8->as_VMReg()->next() );
+  reg_def V9   ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()         );
+  reg_def V9_H ( SOC, SOE, Op_RegF,  9, v9->as_VMReg()->next() );
+  reg_def V10  ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()        );
+  reg_def V10_H( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next());
+  reg_def V11  ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()        );
+  reg_def V11_H( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next());
+  reg_def V12  ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()        );
+  reg_def V12_H( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next());
+  reg_def V13  ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()        );
+  reg_def V13_H( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next());
+  reg_def V14  ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()        );
+  reg_def V14_H( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next());
+  reg_def V15  ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()        );
+  reg_def V15_H( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next());
+  reg_def V16  ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()        );
+  reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next());
+  reg_def V17  ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()        );
+  reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next());
+  reg_def V18  ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()        );
+  reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next());
+  reg_def V19  ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()        );
+  reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next());
+  reg_def V20  ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()        );
+  reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next());
+  reg_def V21  ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()        );
+  reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next());
+  reg_def V22  ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()        );
+  reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next());
+  reg_def V23  ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()        );
+  reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next());
+  reg_def V24  ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()        );
+  reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next());
+  reg_def V25  ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()        );
+  reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next());
+  reg_def V26  ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()        );
+  reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next());
+  reg_def V27  ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()        );
+  reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next());
+  reg_def V28  ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()        );
+  reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next());
+  reg_def V29  ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()        );
+  reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next());
+  reg_def V30  ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()        );
+  reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next());
+  reg_def V31  ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()        );
+  reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next());
+
+// ----------------------------
+// Special Registers
+// ----------------------------
+
+// the AArch64 CSPR status flag register is not directly acessible as
+// instruction operand. the FPSR status flag register is a system
+// register which can be written/read using MSR/MRS but again does not
+// appear as an operand (a code identifying the FSPR occurs as an
+// immediate value in the instruction).
+
+reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
+
+
+// Specify priority of register selection within phases of register
+// allocation.  Highest priority is first.  A useful heuristic is to
+// give registers a low priority when they are required by machine
+// instructions, like EAX and EDX on I486, and choose no-save registers
+// before save-on-call, & save-on-call before save-on-entry.  Registers
+// which participate in fixed calling sequences should come last.
+// Registers which are used as pairs must fall on an even boundary.
+
+alloc_class chunk0(
+    // volatiles
+    R10, R10_H,
+    R11, R11_H,
+    R12, R12_H,
+    R13, R13_H,
+    R14, R14_H,
+    R15, R15_H,
+    R16, R16_H,
+    R17, R17_H,
+    R18, R18_H,
+
+    // arg registers
+    R0, R0_H,
+    R1, R1_H,
+    R2, R2_H,
+    R3, R3_H,
+    R4, R4_H,
+    R5, R5_H,
+    R6, R6_H,
+    R7, R7_H,
+
+    // non-volatiles
+    R19, R19_H,
+    R20, R20_H,
+    R21, R21_H,
+    R22, R22_H,
+    R23, R23_H,
+    R24, R24_H,
+    R25, R25_H,
+    R26, R26_H,
+
+    // non-allocatable registers
+
+    R27, R27_H, // heapbase
+    R28, R28_H, // thread
+    R29, R29_H, // fp
+    R30, R30_H, // lr
+    R31, R31_H, // sp
+);
+
+alloc_class chunk1(
+
+    // no save
+    V16, V16_H,
+    V17, V17_H,
+    V18, V18_H,
+    V19, V19_H,
+    V20, V20_H,
+    V21, V21_H,
+    V22, V22_H,
+    V23, V23_H,
+    V24, V24_H,
+    V25, V25_H,
+    V26, V26_H,
+    V27, V27_H,
+    V28, V28_H,
+    V29, V29_H,
+    V30, V30_H,
+    V31, V31_H,
+
+    // arg registers
+    V0, V0_H,
+    V1, V1_H,
+    V2, V2_H,
+    V3, V3_H,
+    V4, V4_H,
+    V5, V5_H,
+    V6, V6_H,
+    V7, V7_H,
+
+    // non-volatiles
+    V8, V8_H,
+    V9, V9_H,
+    V10, V10_H,
+    V11, V11_H,
+    V12, V12_H,
+    V13, V13_H,
+    V14, V14_H,
+    V15, V15_H,
+);
+
+alloc_class chunk2(RFLAGS);
+
+//----------Architecture Description Register Classes--------------------------
+// Several register classes are automatically defined based upon information in
+// this architecture description.
+// 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
+// 2) reg_class compiler_method_oop_reg    ( /* as def'd in frame section */ )
+// 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
+// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
+//
+
+// Class for all 32 bit integer registers -- excludes SP which will
+// never be used as an integer register
+reg_class any_reg32(
+    R0,
+    R1,
+    R2,
+    R3,
+    R4,
+    R5,
+    R6,
+    R7,
+    R10,
+    R11,
+    R12,
+    R13,
+    R14,
+    R15,
+    R16,
+    R17,
+    R18,
+    R19,
+    R20,
+    R21,
+    R22,
+    R23,
+    R24,
+    R25,
+    R26,
+    R27,
+    R28,
+    R29,
+    R30
+);
+
+// Singleton class for R0 int register
+reg_class int_r0_reg(R0);
+
+// Singleton class for R2 int register
+reg_class int_r2_reg(R2);
+
+// Singleton class for R3 int register
+reg_class int_r3_reg(R3);
+
+// Singleton class for R4 int register
+reg_class int_r4_reg(R4);
+
+// Class for all long integer registers (including RSP)
+reg_class any_reg(
+    R0, R0_H,
+    R1, R1_H,
+    R2, R2_H,
+    R3, R3_H,
+    R4, R4_H,
+    R5, R5_H,
+    R6, R6_H,
+    R7, R7_H,
+    R10, R10_H,
+    R11, R11_H,
+    R12, R12_H,
+    R13, R13_H,
+    R14, R14_H,
+    R15, R15_H,
+    R16, R16_H,
+    R17, R17_H,
+    R18, R18_H,
+    R19, R19_H,
+    R20, R20_H,
+    R21, R21_H,
+    R22, R22_H,
+    R23, R23_H,
+    R24, R24_H,
+    R25, R25_H,
+    R26, R26_H,
+    R27, R27_H,
+    R28, R28_H,
+    R29, R29_H,
+    R30, R30_H,
+    R31, R31_H
+);
+
+// Class for all non-special integer registers
+reg_class no_special_reg32(
+    R0,
+    R1,
+    R2,
+    R3,
+    R4,
+    R5,
+    R6,
+    R7,
+    R10,
+    R11,
+    R12,                        // rmethod
+    R13,
+    R14,
+    R15,
+    R16,
+    R17,
+    R18,
+    R19,
+    R20,
+    R21,
+    R22,
+    R23,
+    R24,
+    R25,
+    R26
+ /* R27, */                     // heapbase
+ /* R28, */                     // thread
+ /* R29, */                     // fp
+ /* R30, */                     // lr
+ /* R31 */                      // sp
+);
+
+// Class for all non-special long integer registers
+reg_class no_special_reg(
+    R0, R0_H,
+    R1, R1_H,
+    R2, R2_H,
+    R3, R3_H,
+    R4, R4_H,
+    R5, R5_H,
+    R6, R6_H,
+    R7, R7_H,
+    R10, R10_H,
+    R11, R11_H,
+    R12, R12_H,                 // rmethod
+    R13, R13_H,
+    R14, R14_H,
+    R15, R15_H,
+    R16, R16_H,
+    R17, R17_H,
+    R18, R18_H,
+    R19, R19_H,
+    R20, R20_H,
+    R21, R21_H,
+    R22, R22_H,
+    R23, R23_H,
+    R24, R24_H,
+    R25, R25_H,
+    R26, R26_H,
+ /* R27, R27_H, */              // heapbase
+ /* R28, R28_H, */              // thread
+ /* R29, R29_H, */              // fp
+ /* R30, R30_H, */              // lr
+ /* R31, R31_H */               // sp
+);
+
+// Class for 64 bit register r0
+reg_class r0_reg(
+    R0, R0_H
+);
+
+// Class for 64 bit register r1
+reg_class r1_reg(
+    R1, R1_H
+);
+
+// Class for 64 bit register r2
+reg_class r2_reg(
+    R2, R2_H
+);
+
+// Class for 64 bit register r3
+reg_class r3_reg(
+    R3, R3_H
+);
+
+// Class for 64 bit register r4
+reg_class r4_reg(
+    R4, R4_H
+);
+
+// Class for 64 bit register r5
+reg_class r5_reg(
+    R5, R5_H
+);
+
+// Class for 64 bit register r10
+reg_class r10_reg(
+    R10, R10_H
+);
+
+// Class for 64 bit register r11
+reg_class r11_reg(
+    R11, R11_H
+);
+
+// Class for method register
+reg_class method_reg(
+    R12, R12_H
+);
+
+// Class for heapbase register
+reg_class heapbase_reg(
+    R27, R27_H
+);
+
+// Class for thread register
+reg_class thread_reg(
+    R28, R28_H
+);
+
+// Class for frame pointer register
+reg_class fp_reg(
+    R29, R29_H
+);
+
+// Class for link register
+reg_class lr_reg(
+    R30, R30_H
+);
+
+// Class for long sp register
+reg_class sp_reg(
+  R31, R31_H
+);
+
+// Class for all pointer registers
+reg_class ptr_reg(
+    R0, R0_H,
+    R1, R1_H,
+    R2, R2_H,
+    R3, R3_H,
+    R4, R4_H,
+    R5, R5_H,
+    R6, R6_H,
+    R7, R7_H,
+    R10, R10_H,
+    R11, R11_H,
+    R12, R12_H,
+    R13, R13_H,
+    R14, R14_H,
+    R15, R15_H,
+    R16, R16_H,
+    R17, R17_H,
+    R18, R18_H,
+    R19, R19_H,
+    R20, R20_H,
+    R21, R21_H,
+    R22, R22_H,
+    R23, R23_H,
+    R24, R24_H,
+    R25, R25_H,
+    R26, R26_H,
+    R27, R27_H,
+    R28, R28_H,
+    R29, R29_H,
+    R30, R30_H,
+    R31, R31_H
+);
+
+// Class for all non_special pointer registers
+reg_class no_special_ptr_reg(
+    R0, R0_H,
+    R1, R1_H,
+    R2, R2_H,
+    R3, R3_H,
+    R4, R4_H,
+    R5, R5_H,
+    R6, R6_H,
+    R7, R7_H,
+    R10, R10_H,
+    R11, R11_H,
+    R12, R12_H,
+    R13, R13_H,
+    R14, R14_H,
+    R15, R15_H,
+    R16, R16_H,
+    R17, R17_H,
+    R18, R18_H,
+    R19, R19_H,
+    R20, R20_H,
+    R21, R21_H,
+    R22, R22_H,
+    R23, R23_H,
+    R24, R24_H,
+    R25, R25_H,
+    R26, R26_H,
+ /* R27, R27_H, */              // heapbase
+ /* R28, R28_H, */              // thread
+ /* R29, R29_H, */              // fp
+ /* R30, R30_H, */              // lr
+ /* R31, R31_H */               // sp
+);
+
+// Class for all float registers
+reg_class float_reg(
+    V0,
+    V1,
+    V2,
+    V3,
+    V4,
+    V5,
+    V6,
+    V7,
+    V8,
+    V9,
+    V10,
+    V11,
+    V12,
+    V13,
+    V14,
+    V15,
+    V16,
+    V17,
+    V18,
+    V19,
+    V20,
+    V21,
+    V22,
+    V23,
+    V24,
+    V25,
+    V26,
+    V27,
+    V28,
+    V29,
+    V30,
+    V31
+);
+
+// Double precision float registers have virtual `high halves' that
+// are needed by the allocator.
+// Class for all double registers
+reg_class double_reg(
+    V0, V0_H,
+    V1, V1_H,
+    V2, V2_H,
+    V3, V3_H,
+    V4, V4_H,
+    V5, V5_H,
+    V6, V6_H,
+    V7, V7_H,
+    V8, V8_H,
+    V9, V9_H,
+    V10, V10_H,
+    V11, V11_H,
+    V12, V12_H,
+    V13, V13_H,
+    V14, V14_H,
+    V15, V15_H,
+    V16, V16_H,
+    V17, V17_H,
+    V18, V18_H,
+    V19, V19_H,
+    V20, V20_H,
+    V21, V21_H,
+    V22, V22_H,
+    V23, V23_H,
+    V24, V24_H,
+    V25, V25_H,
+    V26, V26_H,
+    V27, V27_H,
+    V28, V28_H,
+    V29, V29_H,
+    V30, V30_H,
+    V31, V31_H
+);
+
+// Class for 128 bit register v0
+reg_class v0_reg(
+    V0, V0_H
+);
+
+// Class for 128 bit register v1
+reg_class v1_reg(
+    V1, V1_H
+);
+
+// Class for 128 bit register v2
+reg_class v2_reg(
+    V2, V2_H
+);
+
+// Class for 128 bit register v3
+reg_class v3_reg(
+    V3, V3_H
+);
+
+// Singleton class for condition codes
+reg_class int_flags(RFLAGS);
+
+%}
+
+//----------DEFINITION BLOCK---------------------------------------------------
+// Define name --> value mappings to inform the ADLC of an integer valued name
+// Current support includes integer values in the range [0, 0x7FFFFFFF]
+// Format:
+//        int_def  <name>         ( <int_value>, <expression>);
+// Generated Code in ad_<arch>.hpp
+//        #define  <name>   (<expression>)
+//        // value == <int_value>
+// Generated code in ad_<arch>.cpp adlc_verification()
+//        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
+//
+
+// we follow the ppc-aix port in using a simple cost model which ranks
+// register operations as cheap, memory ops as more expensive and
+// branches as most expensive. the first two have a low as well as a
+// normal cost. huge cost appears to be a way of saying don't do
+// something
+
+definitions %{
+  // The default cost (of a register move instruction).
+  int_def INSN_COST            (    100,     100);
+  int_def BRANCH_COST          (    200,     2 * INSN_COST);
+  int_def CALL_COST            (    200,     2 * INSN_COST);
+  int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
+%}
+
+
+//----------SOURCE BLOCK-------------------------------------------------------
+// This is a block of C++ code which provides values, functions, and
+// definitions necessary in the rest of the architecture description
+
+source_hpp %{
+
+class CallStubImpl {
+
+  //--------------------------------------------------------------
+  //---<  Used for optimization in Compile::shorten_branches  >---
+  //--------------------------------------------------------------
+
+ public:
+  // Size of call trampoline stub.
+  static uint size_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+
+  // number of relocations needed by a call trampoline stub
+  static uint reloc_call_trampoline() {
+    return 0; // no call trampolines on this platform
+  }
+};
+
+class HandlerImpl {
+
+ public:
+
+  static int emit_exception_handler(CodeBuffer &cbuf);
+  static int emit_deopt_handler(CodeBuffer& cbuf);
+
+  static uint size_exception_handler() {
+    return MacroAssembler::far_branch_size();
+  }
+
+  static uint size_deopt_handler() {
+    // count one adr and one far branch instruction
+    return 4 * NativeInstruction::instruction_size;
+  }
+};
+
+  bool preceded_by_ordered_load(const Node *barrier);
+
+  // Use barrier instructions rather than load acquire / store
+  // release.
+  const bool UseBarriersForVolatile = true;
+%}
+
+source %{
+
+  // AArch64 has load acquire and store release instructions which we
+  // use for ordered memory accesses, e.g. for volatiles.  The ideal
+  // graph generator also inserts memory barriers around volatile
+  // accesses, and we don't want to generate both barriers and acq/rel
+  // instructions.  So, when we emit a MemBarAcquire we look back in
+  // the ideal graph for an ordered load and only emit the barrier if
+  // we don't find one.
+
+bool preceded_by_ordered_load(const Node *barrier) {
+  Node *x = barrier->lookup(TypeFunc::Parms);
+
+  if (! x)
+    return false;
+
+  if (x->is_DecodeNarrowPtr())
+    x = x->in(1);
+
+  if (x->is_Load())
+    return ! x->as_Load()->is_unordered();
+
+  return false;
+}
+
+#define __ _masm.
+
+// advance declarations for helper functions to convert register
+// indices to register objects
+
+// the ad file has to provide implementations of certain methods
+// expected by the generic code
+//
+// REQUIRED FUNCTIONALITY
+
+//=============================================================================
+
+// !!!!! Special hack to get all types of calls to specify the byte offset
+//       from the start of the call to the point where the return address
+//       will point.
+
+int MachCallStaticJavaNode::ret_addr_offset()
+{
+  // call should be a simple bl
+  // unless this is a method handle invoke in which case it is
+  // mov(rfp, sp), bl, mov(sp, rfp)
+  int off = 4;
+  if (_method_handle_invoke) {
+    off += 4;
+  }
+  return off;
+}
+
+int MachCallDynamicJavaNode::ret_addr_offset()
+{
+  return 16; // movz, movk, movk, bl
+}
+
+int MachCallRuntimeNode::ret_addr_offset() {
+  // for generated stubs the call will be
+  //   far_call(addr)
+  // for real runtime callouts it will be six instructions
+  // see aarch64_enc_java_to_runtime
+  //   adr(rscratch2, retaddr)
+  //   lea(rscratch1, RuntimeAddress(addr)
+  //   stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)))
+  //   blrt rscratch1
+  CodeBlob *cb = CodeCache::find_blob(_entry_point);
+  if (cb) {
+    return MacroAssembler::far_branch_size();
+  } else {
+    return 6 * NativeInstruction::instruction_size;
+  }
+}
+
+// Indicate if the safepoint node needs the polling page as an input
+
+// the shared code plants the oop data at the start of the generated
+// code for the safepoint node and that needs ot be at the load
+// instruction itself. so we cannot plant a mov of the safepoint poll
+// address followed by a load. setting this to true means the mov is
+// scheduled as a prior instruction. that's better for scheduling
+// anyway.
+
+bool SafePointNode::needs_polling_address_input()
+{
+  return true;
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  st->print("BREAKPOINT");
+}
+#endif
+
+void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+  __ brk(0);
+}
+
+uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
+  return MachNode::size(ra_);
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+  void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
+    st->print("nop \t# %d bytes pad for loops and calls", _count);
+  }
+#endif
+
+  void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
+    MacroAssembler _masm(&cbuf);
+    for (int i = 0; i < _count; i++) {
+      __ nop();
+    }
+  }
+
+  uint MachNopNode::size(PhaseRegAlloc*) const {
+    return _count * NativeInstruction::instruction_size;
+  }
+
+//=============================================================================
+const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
+
+int Compile::ConstantTable::calculate_table_base_offset() const {
+  return 0;  // absolute addressing, no offset
+}
+
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
+void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+  // Empty encoding
+}
+
+uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
+  return 0;
+}
+
+#ifndef PRODUCT
+void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
+  st->print("-- \t// MachConstantBaseNode (empty encoding)");
+}
+#endif
+
+#ifndef PRODUCT
+void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  Compile* C = ra_->C;
+
+  int framesize = C->frame_slots() << LogBytesPerInt;
+
+  if (C->need_stack_bang(framesize))
+    st->print("# stack bang size=%d\n\t", framesize);
+
+  if (framesize == 0) {
+    // Is this even possible?
+    st->print("stp  lr, rfp, [sp, #%d]!", -(2 * wordSize));
+  } else if (framesize < ((1 << 9) + 2 * wordSize)) {
+    st->print("sub  sp, sp, #%d\n\t", framesize);
+    st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
+  } else {
+    st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
+    st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
+    st->print("sub  sp, sp, rscratch1");
+  }
+}
+#endif
+
+void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+
+  // n.b. frame size includes space for return pc and rfp
+  const long framesize = C->frame_size_in_bytes();
+  assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
+
+  // insert a nop at the start of the prolog so we can patch in a
+  // branch if we need to invalidate the method later
+  __ nop();
+
+  int bangsize = C->bang_size_in_bytes();
+  if (C->need_stack_bang(bangsize) && UseStackBanging)
+    __ generate_stack_overflow_check(bangsize);
+
+  __ build_frame(framesize);
+
+  if (NotifySimulator) {
+    __ notify(Assembler::method_entry);
+  }
+
+  if (VerifyStackAtCalls) {
+    Unimplemented();
+  }
+
+  C->set_frame_complete(cbuf.insts_size());
+
+  if (C->has_mach_constant_base_node()) {
+    // NOTE: We set the table base offset here because users might be
+    // emitted before MachConstantBaseNode.
+    Compile::ConstantTable& constant_table = C->constant_table();
+    constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
+  }
+}
+
+uint MachPrologNode::size(PhaseRegAlloc* ra_) const
+{
+  return MachNode::size(ra_); // too many variables; just compute it
+                              // the hard way
+}
+
+int MachPrologNode::reloc() const
+{
+  return 0;
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  Compile* C = ra_->C;
+  int framesize = C->frame_slots() << LogBytesPerInt;
+
+  st->print("# pop frame %d\n\t",framesize);
+
+  if (framesize == 0) {
+    st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
+  } else if (framesize < ((1 << 9) + 2 * wordSize)) {
+    st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
+    st->print("add  sp, sp, #%d\n\t", framesize);
+  } else {
+    st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
+    st->print("add  sp, sp, rscratch1\n\t");
+    st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
+  }
+
+  if (do_polling() && C->is_method_compilation()) {
+    st->print("# touch polling page\n\t");
+    st->print("mov  rscratch1, #0x%lx\n\t", p2i(os::get_polling_page()));
+    st->print("ldr zr, [rscratch1]");
+  }
+}
+#endif
+
+void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+  int framesize = C->frame_slots() << LogBytesPerInt;
+
+  __ remove_frame(framesize);
+
+  if (NotifySimulator) {
+    __ notify(Assembler::method_reentry);
+  }
+
+  if (do_polling() && C->is_method_compilation()) {
+    __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type);
+  }
+}
+
+uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
+  // Variable size. Determine dynamically.
+  return MachNode::size(ra_);
+}
+
+int MachEpilogNode::reloc() const {
+  // Return number of relocatable values contained in this instruction.
+  return 1; // 1 for polling page.
+}
+
+const Pipeline * MachEpilogNode::pipeline() const {
+  return MachNode::pipeline_class();
+}
+
+// This method seems to be obsolete. It is declared in machnode.hpp
+// and defined in all *.ad files, but it is never called. Should we
+// get rid of it?
+int MachEpilogNode::safepoint_offset() const {
+  assert(do_polling(), "no return for this epilog node");
+  return 4;
+}
+
+//=============================================================================
+
+// Figure out which register class each belongs in: rc_int, rc_float or
+// rc_stack.
+enum RC { rc_bad, rc_int, rc_float, rc_stack };
+
+static enum RC rc_class(OptoReg::Name reg) {
+
+  if (reg == OptoReg::Bad) {
+    return rc_bad;
+  }
+
+  // we have 30 int registers * 2 halves
+  // (rscratch1 and rscratch2 are omitted)
+
+  if (reg < 60) {
+    return rc_int;
+  }
+
+  // we have 32 float register * 2 halves
+  if (reg < 60 + 64) {
+    return rc_float;
+  }
+
+  // Between float regs & stack is the flags regs.
+  assert(OptoReg::is_stack(reg), "blow up if spilling flags");
+
+  return rc_stack;
+}
+
+uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
+  Compile* C = ra_->C;
+
+  // Get registers to move.
+  OptoReg::Name src_hi = ra_->get_reg_second(in(1));
+  OptoReg::Name src_lo = ra_->get_reg_first(in(1));
+  OptoReg::Name dst_hi = ra_->get_reg_second(this);
+  OptoReg::Name dst_lo = ra_->get_reg_first(this);
+
+  enum RC src_hi_rc = rc_class(src_hi);
+  enum RC src_lo_rc = rc_class(src_lo);
+  enum RC dst_hi_rc = rc_class(dst_hi);
+  enum RC dst_lo_rc = rc_class(dst_lo);
+
+  assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
+
+  if (src_hi != OptoReg::Bad) {
+    assert((src_lo&1)==0 && src_lo+1==src_hi &&
+           (dst_lo&1)==0 && dst_lo+1==dst_hi,
+           "expected aligned-adjacent pairs");
+  }
+
+  if (src_lo == dst_lo && src_hi == dst_hi) {
+    return 0;            // Self copy, no move.
+  }
+
+  switch (src_lo_rc) {
+  case rc_int:
+    if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ mov(as_Register(Matcher::_regEncode[dst_lo]),
+                 as_Register(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("mov  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ movw(as_Register(Matcher::_regEncode[dst_lo]),
+                  as_Register(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("movw  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      }
+    } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                   as_Register(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovd  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                   as_Register(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovs  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      }
+    } else {                    // gpr --> stack spill
+      assert(dst_lo_rc == rc_stack, "spill to bad register class");
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ str(as_Register(Matcher::_regEncode[src_lo]),
+                 Address(sp, dst_offset));
+        } else if (st) {
+          st->print("str  %s, [sp, #%d]\t# spill",
+                    Matcher::regName[src_lo],
+                    dst_offset);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ strw(as_Register(Matcher::_regEncode[src_lo]),
+                 Address(sp, dst_offset));
+        } else if (st) {
+          st->print("strw  %s, [sp, #%d]\t# spill",
+                    Matcher::regName[src_lo],
+                    dst_offset);
+        }
+      }
+    }
+    return 4;
+  case rc_float:
+    if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
+                   as_FloatRegister(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovd  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
+                   as_FloatRegister(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovs  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      }
+    } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                   as_FloatRegister(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovd  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                   as_FloatRegister(Matcher::_regEncode[src_lo]));
+        } else if (st) {
+          st->print("fmovs  %s, %s\t# shuffle",
+                    Matcher::regName[dst_lo],
+                    Matcher::regName[src_lo]);
+        }
+      }
+    } else {                    // fpr --> stack spill
+      assert(dst_lo_rc == rc_stack, "spill to bad register class");
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ strd(as_FloatRegister(Matcher::_regEncode[src_lo]),
+                 Address(sp, dst_offset));
+        } else if (st) {
+          st->print("strd  %s, [sp, #%d]\t# spill",
+                    Matcher::regName[src_lo],
+                    dst_offset);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ strs(as_FloatRegister(Matcher::_regEncode[src_lo]),
+                 Address(sp, dst_offset));
+        } else if (st) {
+          st->print("strs  %s, [sp, #%d]\t# spill",
+                    Matcher::regName[src_lo],
+                    dst_offset);
+        }
+      }
+    }
+    return 4;
+  case rc_stack:
+    int src_offset = ra_->reg2offset(src_lo);
+    if (dst_lo_rc == rc_int) {  // stack --> gpr load
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldr(as_Register(Matcher::_regEncode[dst_lo]),
+                 Address(sp, src_offset));
+        } else if (st) {
+          st->print("ldr  %s, [sp, %d]\t# restore",
+                    Matcher::regName[dst_lo],
+                    src_offset);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldrw(as_Register(Matcher::_regEncode[dst_lo]),
+                  Address(sp, src_offset));
+        } else if (st) {
+          st->print("ldr  %s, [sp, %d]\t# restore",
+                    Matcher::regName[dst_lo],
+                   src_offset);
+        }
+      }
+      return 4;
+    } else if (dst_lo_rc == rc_float) { // stack --> fpr load
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldrd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                 Address(sp, src_offset));
+        } else if (st) {
+          st->print("ldrd  %s, [sp, %d]\t# restore",
+                    Matcher::regName[dst_lo],
+                    src_offset);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldrs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
+                  Address(sp, src_offset));
+        } else if (st) {
+          st->print("ldrs  %s, [sp, %d]\t# restore",
+                    Matcher::regName[dst_lo],
+                   src_offset);
+        }
+      }
+      return 4;
+    } else {                    // stack --> stack copy
+      assert(dst_lo_rc == rc_stack, "spill to bad register class");
+      int dst_offset = ra_->reg2offset(dst_lo);
+      if (((src_lo & 1) == 0 && src_lo + 1 == src_hi) &&
+          (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi) {
+          // 64 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldr(rscratch1, Address(sp, src_offset));
+          __ str(rscratch1, Address(sp, dst_offset));
+        } else if (st) {
+          st->print("ldr  rscratch1, [sp, %d]\t# mem-mem spill",
+                    src_offset);
+          st->print("\n\t");
+          st->print("str  rscratch1, [sp, %d]",
+                    dst_offset);
+        }
+      } else {
+        // 32 bit
+        if (cbuf) {
+          MacroAssembler _masm(cbuf);
+          __ ldrw(rscratch1, Address(sp, src_offset));
+          __ strw(rscratch1, Address(sp, dst_offset));
+        } else if (st) {
+          st->print("ldrw  rscratch1, [sp, %d]\t# mem-mem spill",
+                    src_offset);
+          st->print("\n\t");
+          st->print("strw  rscratch1, [sp, %d]",
+                    dst_offset);
+        }
+      }
+      return 8;
+    }
+  }
+
+  assert(false," bad rc_class for spill ");
+  Unimplemented();
+  return 0;
+
+}
+
+#ifndef PRODUCT
+void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  if (!ra_)
+    st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
+  else
+    implementation(NULL, ra_, false, st);
+}
+#endif
+
+void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  implementation(&cbuf, ra_, false, NULL);
+}
+
+uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
+  return implementation(NULL, ra_, true, NULL);
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg = ra_->get_reg_first(this);
+  st->print("add %s, rsp, #%d]\t# box lock",
+            Matcher::regName[reg], offset);
+}
+#endif
+
+void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg    = ra_->get_encode(this);
+
+  if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
+    __ add(as_Register(reg), sp, offset);
+  } else {
+    ShouldNotReachHere();
+  }
+}
+
+uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
+  // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
+  return 4;
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
+{
+  st->print_cr("# MachUEPNode");
+  if (UseCompressedClassPointers) {
+    st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+    if (Universe::narrow_klass_shift() != 0) {
+      st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
+    }
+  } else {
+   st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+  }
+  st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
+  st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
+}
+#endif
+
+void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+{
+  // This is the unverified entry point.
+  MacroAssembler _masm(&cbuf);
+
+  __ cmp_klass(j_rarg0, rscratch2, rscratch1);
+  Label skip;
+  // TODO
+  // can we avoid this skip and still use a reloc?
+  __ br(Assembler::EQ, skip);
+  __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+  __ bind(skip);
+}
+
+uint MachUEPNode::size(PhaseRegAlloc* ra_) const
+{
+  return MachNode::size(ra_);
+}
+
+// REQUIRED EMIT CODE
+
+//=============================================================================
+
+// Emit exception handler code.
+int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
+{
+  // mov rscratch1 #exception_blob_entry_point
+  // br rscratch1
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a handler.
+  MacroAssembler _masm(&cbuf);
+  address base =
+  __ start_a_stub(size_exception_handler());
+  if (base == NULL)  return 0;  // CodeBuffer::expand failed
+  int offset = __ offset();
+  __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
+  assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
+  __ end_a_stub();
+  return offset;
+}
+
+// Emit deopt handler code.
+int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
+{
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a handler.
+  MacroAssembler _masm(&cbuf);
+  address base =
+  __ start_a_stub(size_deopt_handler());
+  if (base == NULL)  return 0;  // CodeBuffer::expand failed
+  int offset = __ offset();
+
+  __ adr(lr, __ pc());
+  __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
+
+  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+  __ end_a_stub();
+  return offset;
+}
+
+// REQUIRED MATCHER CODE
+
+//=============================================================================
+
+const bool Matcher::match_rule_supported(int opcode) {
+
+  // TODO
+  // identify extra cases that we might want to provide match rules for
+  // e.g. Op_StrEquals and other intrinsics
+  if (!has_match_rule(opcode)) {
+    return false;
+  }
+
+  return true;  // Per default match rules are supported.
+}
+
+int Matcher::regnum_to_fpu_offset(int regnum)
+{
+  Unimplemented();
+  return 0;
+}
+
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset)
+{
+  Unimplemented();
+  return false;
+}
+
+const bool Matcher::isSimpleConstant64(jlong value) {
+  // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
+  // Probably always true, even if a temp register is required.
+  return true;
+}
+
+// true just means we have fast l2f conversion
+const bool Matcher::convL2FSupported(void) {
+  return true;
+}
+
+// Vector width in bytes.
+const int Matcher::vector_width_in_bytes(BasicType bt) {
+  // TODO fixme
+  return 0;
+}
+
+// Limits on vector size (number of elements) loaded into vector.
+const int Matcher::max_vector_size(const BasicType bt) {
+  return vector_width_in_bytes(bt)/type2aelembytes(bt);
+}
+const int Matcher::min_vector_size(const BasicType bt) {
+  int max_size = max_vector_size(bt);
+  // Min size which can be loaded into vector is 4 bytes.
+  int size = (type2aelembytes(bt) == 1) ? 4 : 2;
+  return MIN2(size,max_size);
+}
+
+// Vector ideal reg.
+const int Matcher::vector_ideal_reg(int len) {
+  // TODO fixme
+  return Op_RegD;
+}
+
+// Only lowest bits of xmm reg are used for vector shift count.
+const int Matcher::vector_shift_count_ideal_reg(int size) {
+  // TODO fixme
+  return Op_RegL;
+}
+
+// AES support not yet implemented
+const bool Matcher::pass_original_key_for_aes() {
+  return false;
+}
+
+// x86 supports misaligned vectors store/load.
+const bool Matcher::misaligned_vectors_ok() {
+  // TODO fixme
+  // return !AlignVector; // can be changed by flag
+  return false;
+}
+
+// false => size gets scaled to BytesPerLong, ok.
+const bool Matcher::init_array_count_is_in_bytes = false;
+
+// Threshold size for cleararray.
+const int Matcher::init_array_short_size = 18 * BytesPerLong;
+
+// Use conditional move (CMOVL)
+const int Matcher::long_cmove_cost() {
+  // long cmoves are no more expensive than int cmoves
+  return 0;
+}
+
+const int Matcher::float_cmove_cost() {
+  // float cmoves are no more expensive than int cmoves
+  return 0;
+}
+
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
+// Should the Matcher clone shifts on addressing modes, expecting them
+// to be subsumed into complex addressing expressions or compute them
+// into registers?  True for Intel but false for most RISCs
+const bool Matcher::clone_shift_expressions = false;
+
+// Do we need to mask the count passed to shift instructions or does
+// the cpu only look at the lower 5/6 bits anyway?
+const bool Matcher::need_masked_shift_count = false;
+
+// This affects two different things:
+//  - how Decode nodes are matched
+//  - how ImplicitNullCheck opportunities are recognized
+// If true, the matcher will try to remove all Decodes and match them
+// (as operands) into nodes. NullChecks are not prepared to deal with
+// Decodes by final_graph_reshaping().
+// If false, final_graph_reshaping() forces the decode behind the Cmp
+// for a NullCheck. The matcher matches the Decode node into a register.
+// Implicit_null_check optimization moves the Decode along with the
+// memory operation back up before the NullCheck.
+bool Matcher::narrow_oop_use_complex_address() {
+  return Universe::narrow_oop_shift() == 0;
+}
+
+bool Matcher::narrow_klass_use_complex_address() {
+// TODO
+// decide whether we need to set this to true
+  return false;
+}
+
+// Is it better to copy float constants, or load them directly from
+// memory?  Intel can load a float constant from a direct address,
+// requiring no extra registers.  Most RISCs will have to materialize
+// an address into a register first, so they would do better to copy
+// the constant from stack.
+const bool Matcher::rematerialize_float_constants = false;
+
+// If CPU can load and store mis-aligned doubles directly then no
+// fixup is needed.  Else we split the double into 2 integer pieces
+// and move it piece-by-piece.  Only happens when passing doubles into
+// C code as the Java calling convention forces doubles to be aligned.
+const bool Matcher::misaligned_doubles_ok = true;
+
+// No-op on amd64
+void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
+  Unimplemented();
+}
+
+// Advertise here if the CPU requires explicit rounding operations to
+// implement the UseStrictFP mode.
+const bool Matcher::strict_fp_requires_explicit_rounding = false;
+
+// Are floats converted to double when stored to stack during
+// deoptimization?
+bool Matcher::float_in_double() { return true; }
+
+// Do ints take an entire long register or just half?
+// The relevant question is how the int is callee-saved:
+// the whole long is written but de-opt'ing will have to extract
+// the relevant 32 bits.
+const bool Matcher::int_in_long = true;
+
+// Return whether or not this register is ever used as an argument.
+// This function is used on startup to build the trampoline stubs in
+// generateOptoStub.  Registers not mentioned will be killed by the VM
+// call in the trampoline, and arguments in those registers not be
+// available to the callee.
+bool Matcher::can_be_java_arg(int reg)
+{
+  return
+    reg ==  R0_num || reg == R0_H_num ||
+    reg ==  R1_num || reg == R1_H_num ||
+    reg ==  R2_num || reg == R2_H_num ||
+    reg ==  R3_num || reg == R3_H_num ||
+    reg ==  R4_num || reg == R4_H_num ||
+    reg ==  R5_num || reg == R5_H_num ||
+    reg ==  R6_num || reg == R6_H_num ||
+    reg ==  R7_num || reg == R7_H_num ||
+    reg ==  V0_num || reg == V0_H_num ||
+    reg ==  V1_num || reg == V1_H_num ||
+    reg ==  V2_num || reg == V2_H_num ||
+    reg ==  V3_num || reg == V3_H_num ||
+    reg ==  V4_num || reg == V4_H_num ||
+    reg ==  V5_num || reg == V5_H_num ||
+    reg ==  V6_num || reg == V6_H_num ||
+    reg ==  V7_num || reg == V7_H_num;
+}
+
+bool Matcher::is_spillable_arg(int reg)
+{
+  return can_be_java_arg(reg);
+}
+
+bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
+  return false;
+}
+
+RegMask Matcher::divI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODI projection of divmodI.
+RegMask Matcher::modI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for DIVL projection of divmodL.
+RegMask Matcher::divL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODL projection of divmodL.
+RegMask Matcher::modL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return RegMask();
+}
+
+// helper for encoding java_to_runtime calls on sim
+//
+// this is needed to compute the extra arguments required when
+// planting a call to the simulator blrt instruction. the TypeFunc
+// can be queried to identify the counts for integral, and floating
+// arguments and the return type
+
+static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype)
+{
+  int gps = 0;
+  int fps = 0;
+  const TypeTuple *domain = tf->domain();
+  int max = domain->cnt();
+  for (int i = TypeFunc::Parms; i < max; i++) {
+    const Type *t = domain->field_at(i);
+    switch(t->basic_type()) {
+    case T_FLOAT:
+    case T_DOUBLE:
+      fps++;
+    default:
+      gps++;
+    }
+  }
+  gpcnt = gps;
+  fpcnt = fps;
+  BasicType rt = tf->return_type();
+  switch (rt) {
+  case T_VOID:
+    rtype = MacroAssembler::ret_type_void;
+    break;
+  default:
+    rtype = MacroAssembler::ret_type_integral;
+    break;
+  case T_FLOAT:
+    rtype = MacroAssembler::ret_type_float;
+    break;
+  case T_DOUBLE:
+    rtype = MacroAssembler::ret_type_double;
+    break;
+  }
+}
+
+#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
+  MacroAssembler _masm(&cbuf);                                          \
+  {                                                                     \
+    guarantee(INDEX == -1, "mode not permitted for volatile");          \
+    guarantee(DISP == 0, "mode not permitted for volatile");            \
+    guarantee(SCALE == 0, "mode not permitted for volatile");           \
+    __ INSN(REG, as_Register(BASE));                                    \
+  }
+
+typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
+typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
+
+  // Used for all non-volatile memory accesses.  The use of
+  // $mem->opcode() to discover whether this pattern uses sign-extended
+  // offsets is something of a kludge.
+  static void loadStore(MacroAssembler masm, mem_insn insn,
+                         Register reg, int opcode,
+                         Register base, int index, int size, int disp)
+  {
+    Address::extend scale;
+
+    // Hooboy, this is fugly.  We need a way to communicate to the
+    // encoder that the index needs to be sign extended, so we have to
+    // enumerate all the cases.
+    switch (opcode) {
+    case INDINDEXSCALEDOFFSETI2L:
+    case INDINDEXSCALEDI2L:
+    case INDINDEXSCALEDOFFSETI2LN:
+    case INDINDEXSCALEDI2LN:
+      scale = Address::sxtw(size);
+      break;
+    default:
+      scale = Address::lsl(size);
+    }
+
+    if (index == -1) {
+      (masm.*insn)(reg, Address(base, disp));
+    } else {
+      if (disp == 0) {
+        (masm.*insn)(reg, Address(base, as_Register(index), scale));
+      } else {
+        masm.lea(rscratch1, Address(base, disp));
+        (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
+      }
+    }
+  }
+
+  static void loadStore(MacroAssembler masm, mem_float_insn insn,
+                         FloatRegister reg, int opcode,
+                         Register base, int index, int size, int disp)
+  {
+    Address::extend scale;
+
+    switch (opcode) {
+    case INDINDEXSCALEDOFFSETI2L:
+    case INDINDEXSCALEDI2L:
+    case INDINDEXSCALEDOFFSETI2LN:
+    case INDINDEXSCALEDI2LN:
+      scale = Address::sxtw(size);
+      break;
+    default:
+      scale = Address::lsl(size);
+    }
+
+     if (index == -1) {
+      (masm.*insn)(reg, Address(base, disp));
+    } else {
+      if (disp == 0) {
+        (masm.*insn)(reg, Address(base, as_Register(index), scale));
+      } else {
+        masm.lea(rscratch1, Address(base, disp));
+        (masm.*insn)(reg, Address(rscratch1, as_Register(index), scale));
+      }
+    }
+  }
+
+%}
+
+
+
+//----------ENCODING BLOCK-----------------------------------------------------
+// This block specifies the encoding classes used by the compiler to
+// output byte streams.  Encoding classes are parameterized macros
+// used by Machine Instruction Nodes in order to generate the bit
+// encoding of the instruction.  Operands specify their base encoding
+// interface with the interface keyword.  There are currently
+// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
+// COND_INTER.  REG_INTER causes an operand to generate a function
+// which returns its register number when queried.  CONST_INTER causes
+// an operand to generate a function which returns the value of the
+// constant when queried.  MEMORY_INTER causes an operand to generate
+// four functions which return the Base Register, the Index Register,
+// the Scale Value, and the Offset Value of the operand when queried.
+// COND_INTER causes an operand to generate six functions which return
+// the encoding code (ie - encoding bits for the instruction)
+// associated with each basic boolean condition for a conditional
+// instruction.
+//
+// Instructions specify two basic values for encoding.  Again, a
+// function is available to check if the constant displacement is an
+// oop. They use the ins_encode keyword to specify their encoding
+// classes (which must be a sequence of enc_class names, and their
+// parameters, specified in the encoding block), and they use the
+// opcode keyword to specify, in order, their primary, secondary, and
+// tertiary opcode.  Only the opcode sections which a particular
+// instruction needs for encoding need to be specified.
+encode %{
+  // Build emit functions for each basic byte or larger field in the
+  // intel encoding scheme (opcode, rm, sib, immediate), and call them
+  // from C++ code in the enc_class source block.  Emit functions will
+  // live in the main source block for now.  In future, we can
+  // generalize this by adding a syntax that specifies the sizes of
+  // fields in an order, so that the adlc can build the emit functions
+  // automagically
+
+  // catch all for unimplemented encodings
+  enc_class enc_unimplemented %{
+    MacroAssembler _masm(&cbuf);
+    __ unimplemented("C2 catch all");
+  %}
+
+  // BEGIN Non-volatile memory access
+
+  enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{
+    FloatRegister dst_reg = as_FloatRegister($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{
+    FloatRegister dst_reg = as_FloatRegister($dst$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strb(iRegI src, memory mem) %{
+    Register src_reg = as_Register($src$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strb0(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strh(iRegI src, memory mem) %{
+    Register src_reg = as_Register($src$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strh0(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strw(iRegI src, memory mem) %{
+    Register src_reg = as_Register($src$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strw0(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_str(iRegL src, memory mem) %{
+    Register src_reg = as_Register($src$$reg);
+    // we sometimes get asked to store the stack pointer into the
+    // current thread -- we cannot do that directly on AArch64
+    if (src_reg == r31_sp) {
+      MacroAssembler _masm(&cbuf);
+      assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
+      __ mov(rscratch2, sp);
+      src_reg = rscratch2;
+    }
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_str0(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strs(vRegF src, memory mem) %{
+    FloatRegister src_reg = as_FloatRegister($src$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  enc_class aarch64_enc_strd(vRegD src, memory mem) %{
+    FloatRegister src_reg = as_FloatRegister($src$$reg);
+    loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
+               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+  %}
+
+  // END Non-volatile memory access
+
+  // volatile loads and stores
+
+  enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
+    MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlrb);
+  %}
+
+  enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
+    MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlrh);
+  %}
+
+  enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
+    MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlrw);
+  %}
+
+
+  enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarb);
+    __ sxtbw(dst_reg, dst_reg);
+  %}
+
+  enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarb);
+    __ sxtb(dst_reg, dst_reg);
+  %}
+
+  enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarb);
+  %}
+
+  enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarb);
+  %}
+
+  enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarh);
+    __ sxthw(dst_reg, dst_reg);
+  %}
+
+  enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
+    Register dst_reg = as_Register($dst$$reg);
+    MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarh);
+    __ sxth(dst_reg, dst_reg);
+  %}
+
+  enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarh);
+  %}
+
+  enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarh);
+  %}
+
+  enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarw);
+  %}
+
+  enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarw);
+  %}
+
+  enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
+    MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldar);
+  %}
+
+  enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
+    MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldarw);
+    __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
+  %}
+
+  enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
+    MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+             rscratch1, ldar);
+    __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
+  %}
+
+  enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
+    Register src_reg = as_Register($src$$reg);
+    // we sometimes get asked to store the stack pointer into the
+    // current thread -- we cannot do that directly on AArch64
+    if (src_reg == r31_sp) {
+        MacroAssembler _masm(&cbuf);
+      assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
+      __ mov(rscratch2, sp);
+      src_reg = rscratch2;
+    }
+    MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlr);
+  %}
+
+  enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
+    {
+      MacroAssembler _masm(&cbuf);
+      FloatRegister src_reg = as_FloatRegister($src$$reg);
+      __ fmovs(rscratch2, src_reg);
+    }
+    MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlrw);
+  %}
+
+  enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
+    {
+      MacroAssembler _masm(&cbuf);
+      FloatRegister src_reg = as_FloatRegister($src$$reg);
+      __ fmovd(rscratch2, src_reg);
+    }
+    MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
+                 rscratch1, stlr);
+  %}
+
+  // synchronized read/update encodings
+
+  enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    Register base = as_Register($mem$$base);
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+       if (disp != 0) {
+        __ lea(rscratch1, Address(base, disp));
+        __ ldaxr(dst_reg, rscratch1);
+      } else {
+        // TODO
+        // should we ever get anything other than this case?
+        __ ldaxr(dst_reg, base);
+      }
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
+        __ ldaxr(dst_reg, rscratch1);
+      } else {
+        __ lea(rscratch1, Address(base, disp));
+        __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
+        __ ldaxr(dst_reg, rscratch1);
+      }
+    }
+  %}
+
+  enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    Register src_reg = as_Register($src$$reg);
+    Register base = as_Register($mem$$base);
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+       if (disp != 0) {
+        __ lea(rscratch2, Address(base, disp));
+        __ stlxr(rscratch1, src_reg, rscratch2);
+      } else {
+        // TODO
+        // should we ever get anything other than this case?
+        __ stlxr(rscratch1, src_reg, base);
+      }
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
+        __ stlxr(rscratch1, src_reg, rscratch2);
+      } else {
+        __ lea(rscratch2, Address(base, disp));
+        __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
+        __ stlxr(rscratch1, src_reg, rscratch2);
+      }
+    }
+    __ cmpw(rscratch1, zr);
+  %}
+
+  enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
+    MacroAssembler _masm(&cbuf);
+    Register old_reg = as_Register($oldval$$reg);
+    Register new_reg = as_Register($newval$$reg);
+    Register base = as_Register($mem$$base);
+    Register addr_reg;
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+       if (disp != 0) {
+        __ lea(rscratch2, Address(base, disp));
+        addr_reg = rscratch2;
+      } else {
+        // TODO
+        // should we ever get anything other than this case?
+        addr_reg = base;
+      }
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
+        addr_reg = rscratch2;
+      } else {
+        __ lea(rscratch2, Address(base, disp));
+        __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
+        addr_reg = rscratch2;
+      }
+    }
+    Label retry_load, done;
+    __ bind(retry_load);
+    __ ldxr(rscratch1, addr_reg);
+    __ cmp(rscratch1, old_reg);
+    __ br(Assembler::NE, done);
+    __ stlxr(rscratch1, new_reg, addr_reg);
+    __ cbnzw(rscratch1, retry_load);
+    __ bind(done);
+  %}
+
+  enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
+    MacroAssembler _masm(&cbuf);
+    Register old_reg = as_Register($oldval$$reg);
+    Register new_reg = as_Register($newval$$reg);
+    Register base = as_Register($mem$$base);
+    Register addr_reg;
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+       if (disp != 0) {
+        __ lea(rscratch2, Address(base, disp));
+        addr_reg = rscratch2;
+      } else {
+        // TODO
+        // should we ever get anything other than this case?
+        addr_reg = base;
+      }
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
+        addr_reg = rscratch2;
+      } else {
+        __ lea(rscratch2, Address(base, disp));
+        __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
+        addr_reg = rscratch2;
+      }
+    }
+    Label retry_load, done;
+    __ bind(retry_load);
+    __ ldxrw(rscratch1, addr_reg);
+    __ cmpw(rscratch1, old_reg);
+    __ br(Assembler::NE, done);
+    __ stlxrw(rscratch1, new_reg, addr_reg);
+    __ cbnzw(rscratch1, retry_load);
+    __ bind(done);
+  %}
+
+  // auxiliary used for CompareAndSwapX to set result register
+  enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
+    MacroAssembler _masm(&cbuf);
+    Register res_reg = as_Register($res$$reg);
+    __ cset(res_reg, Assembler::EQ);
+  %}
+
+  // prefetch encodings
+
+  enc_class aarch64_enc_prefetchr(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    Register base = as_Register($mem$$base);
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+      __ prfm(Address(base, disp), PLDL1KEEP);
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ prfm(Address(base, index_reg, Address::lsl(scale)), PLDL1KEEP);
+      } else {
+        __ lea(rscratch1, Address(base, disp));
+        __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PLDL1KEEP);
+      }
+    }
+  %}
+
+  enc_class aarch64_enc_prefetchw(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    Register base = as_Register($mem$$base);
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+      __ prfm(Address(base, disp), PSTL1KEEP);
+      __ nop();
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
+      } else {
+        __ lea(rscratch1, Address(base, disp));
+	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
+      }
+    }
+  %}
+
+  enc_class aarch64_enc_prefetchnta(memory mem) %{
+    MacroAssembler _masm(&cbuf);
+    Register base = as_Register($mem$$base);
+    int index = $mem$$index;
+    int scale = $mem$$scale;
+    int disp = $mem$$disp;
+    if (index == -1) {
+      __ prfm(Address(base, disp), PSTL1STRM);
+    } else {
+      Register index_reg = as_Register(index);
+      if (disp == 0) {
+        __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1STRM);
+        __ nop();
+      } else {
+        __ lea(rscratch1, Address(base, disp));
+	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1STRM);
+      }
+    }
+  %}
+
+  enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
+    MacroAssembler _masm(&cbuf);
+    Register cnt_reg = as_Register($cnt$$reg);
+    Register base_reg = as_Register($base$$reg);
+    // base is word aligned
+    // cnt is count of words
+
+    Label loop;
+    Label entry;
+
+//  Algorithm:
+//
+//    scratch1 = cnt & 7;
+//    cnt -= scratch1;
+//    p += scratch1;
+//    switch (scratch1) {
+//      do {
+//        cnt -= 8;
+//          p[-8] = 0;
+//        case 7:
+//          p[-7] = 0;
+//        case 6:
+//          p[-6] = 0;
+//          // ...
+//        case 1:
+//          p[-1] = 0;
+//        case 0:
+//          p += 8;
+//      } while (cnt);
+//    }
+
+    const int unroll = 8; // Number of str(zr) instructions we'll unroll
+
+    __ andr(rscratch1, cnt_reg, unroll - 1);  // tmp1 = cnt % unroll
+    __ sub(cnt_reg, cnt_reg, rscratch1);      // cnt -= unroll
+    // base_reg always points to the end of the region we're about to zero
+    __ add(base_reg, base_reg, rscratch1, Assembler::LSL, exact_log2(wordSize));
+    __ adr(rscratch2, entry);
+    __ sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
+    __ br(rscratch2);
+    __ bind(loop);
+    __ sub(cnt_reg, cnt_reg, unroll);
+    for (int i = -unroll; i < 0; i++)
+      __ str(zr, Address(base_reg, i * wordSize));
+    __ bind(entry);
+    __ add(base_reg, base_reg, unroll * wordSize);
+    __ cbnz(cnt_reg, loop);
+  %}
+
+  /// mov envcodings
+
+  enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
+    MacroAssembler _masm(&cbuf);
+    u_int32_t con = (u_int32_t)$src$$constant;
+    Register dst_reg = as_Register($dst$$reg);
+    if (con == 0) {
+      __ movw(dst_reg, zr);
+    } else {
+      __ movw(dst_reg, con);
+    }
+  %}
+
+  enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    u_int64_t con = (u_int64_t)$src$$constant;
+    if (con == 0) {
+      __ mov(dst_reg, zr);
+    } else {
+      __ mov(dst_reg, con);
+    }
+  %}
+
+  enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    address con = (address)$src$$constant;
+    if (con == NULL || con == (address)1) {
+      ShouldNotReachHere();
+    } else {
+      relocInfo::relocType rtype = $src->constant_reloc();
+      if (rtype == relocInfo::oop_type) {
+        __ movoop(dst_reg, (jobject)con, /*immediate*/true);
+      } else if (rtype == relocInfo::metadata_type) {
+        __ mov_metadata(dst_reg, (Metadata*)con);
+      } else {
+        assert(rtype == relocInfo::none, "unexpected reloc type");
+        if (con < (address)(uintptr_t)os::vm_page_size()) {
+          __ mov(dst_reg, con);
+        } else {
+          unsigned long offset;
+          __ adrp(dst_reg, con, offset);
+          __ add(dst_reg, dst_reg, offset);
+        }
+      }
+    }
+  %}
+
+  enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    __ mov(dst_reg, zr);
+  %}
+
+  enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    __ mov(dst_reg, (u_int64_t)1);
+  %}
+
+  enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{
+    MacroAssembler _masm(&cbuf);
+    address page = (address)$src$$constant;
+    Register dst_reg = as_Register($dst$$reg);
+    unsigned long off;
+    __ adrp(dst_reg, Address(page, relocInfo::poll_type), off);
+    assert(off == 0, "assumed offset == 0");
+  %}
+
+  enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
+    MacroAssembler _masm(&cbuf);
+    address page = (address)$src$$constant;
+    Register dst_reg = as_Register($dst$$reg);
+    unsigned long off;
+    __ adrp(dst_reg, ExternalAddress(page), off);
+    assert(off == 0, "assumed offset == 0");
+  %}
+
+  enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    address con = (address)$src$$constant;
+    if (con == NULL) {
+      ShouldNotReachHere();
+    } else {
+      relocInfo::relocType rtype = $src->constant_reloc();
+      assert(rtype == relocInfo::oop_type, "unexpected reloc type");
+      __ set_narrow_oop(dst_reg, (jobject)con);
+    }
+  %}
+
+  enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    __ mov(dst_reg, zr);
+  %}
+
+  enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    address con = (address)$src$$constant;
+    if (con == NULL) {
+      ShouldNotReachHere();
+    } else {
+      relocInfo::relocType rtype = $src->constant_reloc();
+      assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
+      __ set_narrow_klass(dst_reg, (Klass *)con);
+    }
+  %}
+
+  // arithmetic encodings
+
+  enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    Register src_reg = as_Register($src1$$reg);
+    int32_t con = (int32_t)$src2$$constant;
+    // add has primary == 0, subtract has primary == 1
+    if ($primary) { con = -con; }
+    if (con < 0) {
+      __ subw(dst_reg, src_reg, -con);
+    } else {
+      __ addw(dst_reg, src_reg, con);
+    }
+  %}
+
+  enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register dst_reg = as_Register($dst$$reg);
+    Register src_reg = as_Register($src1$$reg);
+    int32_t con = (int32_t)$src2$$constant;
+    // add has primary == 0, subtract has primary == 1
+    if ($primary) { con = -con; }
+    if (con < 0) {
+      __ sub(dst_reg, src_reg, -con);
+    } else {
+      __ add(dst_reg, src_reg, con);
+    }
+  %}
+
+  enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
+    MacroAssembler _masm(&cbuf);
+   Register dst_reg = as_Register($dst$$reg);
+   Register src1_reg = as_Register($src1$$reg);
+   Register src2_reg = as_Register($src2$$reg);
+    __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
+  %}
+
+  enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
+    MacroAssembler _masm(&cbuf);
+   Register dst_reg = as_Register($dst$$reg);
+   Register src1_reg = as_Register($src1$$reg);
+   Register src2_reg = as_Register($src2$$reg);
+    __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
+  %}
+
+  enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
+    MacroAssembler _masm(&cbuf);
+   Register dst_reg = as_Register($dst$$reg);
+   Register src1_reg = as_Register($src1$$reg);
+   Register src2_reg = as_Register($src2$$reg);
+    __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
+  %}
+
+  enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
+    MacroAssembler _masm(&cbuf);
+   Register dst_reg = as_Register($dst$$reg);
+   Register src1_reg = as_Register($src1$$reg);
+   Register src2_reg = as_Register($src2$$reg);
+    __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
+  %}
+
+  // compare instruction encodings
+
+  enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    Register reg2 = as_Register($src2$$reg);
+    __ cmpw(reg1, reg2);
+  %}
+
+  enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg = as_Register($src1$$reg);
+    int32_t val = $src2$$constant;
+    if (val >= 0) {
+      __ subsw(zr, reg, val);
+    } else {
+      __ addsw(zr, reg, -val);
+    }
+  %}
+
+  enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    u_int32_t val = (u_int32_t)$src2$$constant;
+    __ movw(rscratch1, val);
+    __ cmpw(reg1, rscratch1);
+  %}
+
+  enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    Register reg2 = as_Register($src2$$reg);
+    __ cmp(reg1, reg2);
+  %}
+
+  enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg = as_Register($src1$$reg);
+    int64_t val = $src2$$constant;
+    if (val >= 0) {
+      __ subs(zr, reg, val);
+    } else if (val != -val) {
+      __ adds(zr, reg, -val);
+    } else {
+    // aargh, Long.MIN_VALUE is a special case
+      __ orr(rscratch1, zr, (u_int64_t)val);
+      __ subs(zr, reg, rscratch1);
+    }
+  %}
+
+  enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    u_int64_t val = (u_int64_t)$src2$$constant;
+    __ mov(rscratch1, val);
+    __ cmp(reg1, rscratch1);
+  %}
+
+  enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    Register reg2 = as_Register($src2$$reg);
+    __ cmp(reg1, reg2);
+  %}
+
+  enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg1 = as_Register($src1$$reg);
+    Register reg2 = as_Register($src2$$reg);
+    __ cmpw(reg1, reg2);
+  %}
+
+  enc_class aarch64_enc_testp(iRegP src) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg = as_Register($src$$reg);
+    __ cmp(reg, zr);
+  %}
+
+  enc_class aarch64_enc_testn(iRegN src) %{
+    MacroAssembler _masm(&cbuf);
+    Register reg = as_Register($src$$reg);
+    __ cmpw(reg, zr);
+  %}
+
+  enc_class aarch64_enc_b(label lbl) %{
+    MacroAssembler _masm(&cbuf);
+    Label *L = $lbl$$label;
+    __ b(*L);
+  %}
+
+  enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
+    MacroAssembler _masm(&cbuf);
+    Label *L = $lbl$$label;
+    __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
+  %}
+
+  enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
+    MacroAssembler _masm(&cbuf);
+    Label *L = $lbl$$label;
+    __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
+  %}
+
+  enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
+  %{
+     Register sub_reg = as_Register($sub$$reg);
+     Register super_reg = as_Register($super$$reg);
+     Register temp_reg = as_Register($temp$$reg);
+     Register result_reg = as_Register($result$$reg);
+
+     Label miss;
+     MacroAssembler _masm(&cbuf);
+     __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
+                                     NULL, &miss,
+                                     /*set_cond_codes:*/ true);
+     if ($primary) {
+       __ mov(result_reg, zr);
+     }
+     __ bind(miss);
+  %}
+
+  enc_class aarch64_enc_java_static_call(method meth) %{
+    MacroAssembler _masm(&cbuf);
+
+    address addr = (address)$meth$$method;
+    if (!_method) {
+      // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
+      __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
+    } else if (_optimized_virtual) {
+      __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
+    } else {
+      __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
+    }
+
+    if (_method) {
+      // Emit stub for static call
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
+    }
+  %}
+
+  enc_class aarch64_enc_java_handle_call(method meth) %{
+    MacroAssembler _masm(&cbuf);
+    relocInfo::relocType reloc;
+
+    // RFP is preserved across all calls, even compiled calls.
+    // Use it to preserve SP.
+    __ mov(rfp, sp);
+
+    const int start_offset = __ offset();
+    address addr = (address)$meth$$method;
+    if (!_method) {
+      // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
+      __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf);
+    } else if (_optimized_virtual) {
+      __ trampoline_call(Address(addr, relocInfo::opt_virtual_call_type), &cbuf);
+    } else {
+      __ trampoline_call(Address(addr, relocInfo::static_call_type), &cbuf);
+    }
+
+    if (_method) {
+      // Emit stub for static call
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
+    }
+
+    // now restore sp
+    __ mov(sp, rfp);
+  %}
+
+  enc_class aarch64_enc_java_dynamic_call(method meth) %{
+    MacroAssembler _masm(&cbuf);
+    __ ic_call((address)$meth$$method);
+  %}
+
+  enc_class aarch64_enc_call_epilog() %{
+    MacroAssembler _masm(&cbuf);
+    if (VerifyStackAtCalls) {
+      // Check that stack depth is unchanged: find majik cookie on stack
+      __ call_Unimplemented();
+    }
+  %}
+
+  enc_class aarch64_enc_java_to_runtime(method meth) %{
+    MacroAssembler _masm(&cbuf);
+
+    // some calls to generated routines (arraycopy code) are scheduled
+    // by C2 as runtime calls. if so we can call them using a br (they
+    // will be in a reachable segment) otherwise we have to use a blrt
+    // which loads the absolute address into a register.
+    address entry = (address)$meth$$method;
+    CodeBlob *cb = CodeCache::find_blob(entry);
+    if (cb) {
+      __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
+    } else {
+      int gpcnt;
+      int fpcnt;
+      int rtype;
+      getCallInfo(tf(), gpcnt, fpcnt, rtype);
+      Label retaddr;
+      __ adr(rscratch2, retaddr);
+      __ lea(rscratch1, RuntimeAddress(entry));
+      // Leave a breadcrumb for JavaThread::pd_last_frame().
+      __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
+      __ blrt(rscratch1, gpcnt, fpcnt, rtype);
+      __ bind(retaddr);
+      __ add(sp, sp, 2 * wordSize);
+    }
+  %}
+
+  enc_class aarch64_enc_rethrow() %{
+    MacroAssembler _masm(&cbuf);
+    __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
+  %}
+
+  enc_class aarch64_enc_ret() %{
+    MacroAssembler _masm(&cbuf);
+    __ ret(lr);
+  %}
+
+  enc_class aarch64_enc_tail_call(iRegP jump_target) %{
+    MacroAssembler _masm(&cbuf);
+    Register target_reg = as_Register($jump_target$$reg);
+    __ br(target_reg);
+  %}
+
+  enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
+    MacroAssembler _masm(&cbuf);
+    Register target_reg = as_Register($jump_target$$reg);
+    // exception oop should be in r0
+    // ret addr has been popped into lr
+    // callee expects it in r3
+    __ mov(r3, lr);
+    __ br(target_reg);
+  %}
+
+  enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
+    MacroAssembler _masm(&cbuf);
+    Register oop = as_Register($object$$reg);
+    Register box = as_Register($box$$reg);
+    Register disp_hdr = as_Register($tmp$$reg);
+    Register tmp = as_Register($tmp2$$reg);
+    Label cont;
+    Label object_has_monitor;
+    Label cas_failed;
+
+    assert_different_registers(oop, box, tmp, disp_hdr);
+
+    // Load markOop from object into displaced_header.
+    __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
+
+    // Always do locking in runtime.
+    if (EmitSync & 0x01) {
+      __ cmp(oop, zr);
+      return;
+    }
+
+    if (UseBiasedLocking) {
+      __ biased_locking_enter(disp_hdr, oop, box, tmp, true, cont);
+    }
+
+    // Handle existing monitor
+    if (EmitSync & 0x02) {
+      // we can use AArch64's bit test and branch here but
+      // markoopDesc does not define a bit index just the bit value
+      // so assert in case the bit pos changes
+#     define __monitor_value_log2 1
+      assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
+      __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
+#     undef __monitor_value_log2
+    }
+
+    // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
+    __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
+
+    // Load Compare Value application register.
+
+    // Initialize the box. (Must happen before we update the object mark!)
+    __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+    // Compare object markOop with mark and if equal exchange scratch1
+    // with object markOop.
+    // Note that this is simply a CAS: it does not generate any
+    // barriers.  These are separately generated by
+    // membar_acquire_lock().
+    {
+      Label retry_load;
+      __ bind(retry_load);
+      __ ldxr(tmp, oop);
+      __ cmp(tmp, disp_hdr);
+      __ br(Assembler::NE, cas_failed);
+      // use stlxr to ensure update is immediately visible
+      __ stlxr(tmp, box, oop);
+      __ cbzw(tmp, cont);
+      __ b(retry_load);
+    }
+
+    // Formerly:
+    // __ cmpxchgptr(/*oldv=*/disp_hdr,
+    //               /*newv=*/box,
+    //               /*addr=*/oop,
+    //               /*tmp=*/tmp,
+    //               cont,
+    //               /*fail*/NULL);
+
+    assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+    // If the compare-and-exchange succeeded, then we found an unlocked
+    // object, will have now locked it will continue at label cont
+
+    __ bind(cas_failed);
+    // We did not see an unlocked object so try the fast recursive case.
+
+    // Check if the owner is self by comparing the value in the
+    // markOop of object (disp_hdr) with the stack pointer.
+    __ mov(rscratch1, sp);
+    __ sub(disp_hdr, disp_hdr, rscratch1);
+    __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
+    // If condition is true we are cont and hence we can store 0 as the
+    // displaced header in the box, which indicates that it is a recursive lock.
+    __ ands(tmp/*==0?*/, disp_hdr, tmp);
+    __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+    // Handle existing monitor.
+    if ((EmitSync & 0x02) == 0) {
+      __ b(cont);
+
+      __ bind(object_has_monitor);
+      // The object's monitor m is unlocked iff m->owner == NULL,
+      // otherwise m->owner may contain a thread or a stack address.
+      //
+      // Try to CAS m->owner from NULL to current thread.
+      __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
+      __ mov(disp_hdr, zr);
+
+      {
+        Label retry_load, fail;
+        __ bind(retry_load);
+        __ ldxr(rscratch1, tmp);
+        __ cmp(disp_hdr, rscratch1);
+        __ br(Assembler::NE, fail);
+        // use stlxr to ensure update is immediately visible
+        __ stlxr(rscratch1, rthread, tmp);
+        __ cbnzw(rscratch1, retry_load);
+        __ bind(fail);
+      }
+
+      // Label next;
+      // __ cmpxchgptr(/*oldv=*/disp_hdr,
+      //               /*newv=*/rthread,
+      //               /*addr=*/tmp,
+      //               /*tmp=*/rscratch1,
+      //               /*succeed*/next,
+      //               /*fail*/NULL);
+      // __ bind(next);
+
+      // store a non-null value into the box.
+      __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+      // PPC port checks the following invariants
+      // #ifdef ASSERT
+      // bne(flag, cont);
+      // We have acquired the monitor, check some invariants.
+      // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes());
+      // Invariant 1: _recursions should be 0.
+      // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
+      // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp,
+      //                        "monitor->_recursions should be 0", -1);
+      // Invariant 2: OwnerIsThread shouldn't be 0.
+      // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
+      //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp,
+      //                           "monitor->OwnerIsThread shouldn't be 0", -1);
+      // #endif
+    }
+
+    __ bind(cont);
+    // flag == EQ indicates success
+    // flag == NE indicates failure
+
+  %}
+
+  // TODO
+  // reimplement this with custom cmpxchgptr code
+  // which avoids some of the unnecessary branching
+  enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{
+    MacroAssembler _masm(&cbuf);
+    Register oop = as_Register($object$$reg);
+    Register box = as_Register($box$$reg);
+    Register disp_hdr = as_Register($tmp$$reg);
+    Register tmp = as_Register($tmp2$$reg);
+    Label cont;
+    Label object_has_monitor;
+    Label cas_failed;
+
+    assert_different_registers(oop, box, tmp, disp_hdr);
+
+    // Always do locking in runtime.
+    if (EmitSync & 0x01) {
+      __ cmp(oop, zr); // Oop can't be 0 here => always false.
+      return;
+    }
+
+    if (UseBiasedLocking) {
+      __ biased_locking_exit(oop, tmp, cont);
+    }
+
+    // Find the lock address and load the displaced header from the stack.
+    __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
+    // If the displaced header is 0, we have a recursive unlock.
+    __ cmp(disp_hdr, zr);
+    __ br(Assembler::EQ, cont);
+
+
+    // Handle existing monitor.
+    if ((EmitSync & 0x02) == 0) {
+      __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
+      __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
+    }
+
+    // Check if it is still a light weight lock, this is is true if we
+    // see the stack address of the basicLock in the markOop of the
+    // object.
+
+      {
+        Label retry_load;
+        __ bind(retry_load);
+        __ ldxr(tmp, oop);
+        __ cmp(box, tmp);
+        __ br(Assembler::NE, cas_failed);
+        // use stlxr to ensure update is immediately visible
+        __ stlxr(tmp, disp_hdr, oop);
+        __ cbzw(tmp, cont);
+        __ b(retry_load);
+      }
+
+    // __ cmpxchgptr(/*compare_value=*/box,
+    //               /*exchange_value=*/disp_hdr,
+    //               /*where=*/oop,
+    //               /*result=*/tmp,
+    //               cont,
+    //               /*cas_failed*/NULL);
+    assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+    __ bind(cas_failed);
+
+    // Handle existing monitor.
+    if ((EmitSync & 0x02) == 0) {
+      __ b(cont);
+
+      __ bind(object_has_monitor);
+      __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor
+      __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
+      __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes()));
+      __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner.
+      __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions
+      __ cmp(rscratch1, zr);
+      __ br(Assembler::NE, cont);
+
+      __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes()));
+      __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes()));
+      __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0.
+      __ cmp(rscratch1, zr);
+      __ cbnz(rscratch1, cont);
+      // need a release store here
+      __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes()));
+      __ stlr(rscratch1, tmp); // rscratch1 is zero
+    }
+
+    __ bind(cont);
+    // flag == EQ indicates success
+    // flag == NE indicates failure
+  %}
+
+%}
+
+//----------FRAME--------------------------------------------------------------
+// Definition of frame structure and management information.
+//
+//  S T A C K   L A Y O U T    Allocators stack-slot number
+//                             |   (to get allocators register number
+//  G  Owned by    |        |  v    add OptoReg::stack0())
+//  r   CALLER     |        |
+//  o     |        +--------+      pad to even-align allocators stack-slot
+//  w     V        |  pad0  |        numbers; owned by CALLER
+//  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
+//  h     ^        |   in   |  5
+//        |        |  args  |  4   Holes in incoming args owned by SELF
+//  |     |        |        |  3
+//  |     |        +--------+
+//  V     |        | old out|      Empty on Intel, window on Sparc
+//        |    old |preserve|      Must be even aligned.
+//        |     SP-+--------+----> Matcher::_old_SP, even aligned
+//        |        |   in   |  3   area for Intel ret address
+//     Owned by    |preserve|      Empty on Sparc.
+//       SELF      +--------+
+//        |        |  pad2  |  2   pad to align old SP
+//        |        +--------+  1
+//        |        | locks  |  0
+//        |        +--------+----> OptoReg::stack0(), even aligned
+//        |        |  pad1  | 11   pad to align new SP
+//        |        +--------+
+//        |        |        | 10
+//        |        | spills |  9   spills
+//        V        |        |  8   (pad0 slot for callee)
+//      -----------+--------+----> Matcher::_out_arg_limit, unaligned
+//        ^        |  out   |  7
+//        |        |  args  |  6   Holes in outgoing args owned by CALLEE
+//     Owned by    +--------+
+//      CALLEE     | new out|  6   Empty on Intel, window on Sparc
+//        |    new |preserve|      Must be even-aligned.
+//        |     SP-+--------+----> Matcher::_new_SP, even aligned
+//        |        |        |
+//
+// Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
+//         known from SELF's arguments and the Java calling convention.
+//         Region 6-7 is determined per call site.
+// Note 2: If the calling convention leaves holes in the incoming argument
+//         area, those holes are owned by SELF.  Holes in the outgoing area
+//         are owned by the CALLEE.  Holes should not be nessecary in the
+//         incoming area, as the Java calling convention is completely under
+//         the control of the AD file.  Doubles can be sorted and packed to
+//         avoid holes.  Holes in the outgoing arguments may be nessecary for
+//         varargs C calling conventions.
+// Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
+//         even aligned with pad0 as needed.
+//         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
+//           (the latter is true on Intel but is it false on AArch64?)
+//         region 6-11 is even aligned; it may be padded out more so that
+//         the region from SP to FP meets the minimum stack alignment.
+// Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
+//         alignment.  Region 11, pad1, may be dynamically extended so that
+//         SP meets the minimum alignment.
+
+frame %{
+  // What direction does stack grow in (assumed to be same for C & Java)
+  stack_direction(TOWARDS_LOW);
+
+  // These three registers define part of the calling convention
+  // between compiled code and the interpreter.
+
+  // Inline Cache Register or methodOop for I2C.
+  inline_cache_reg(R12);
+
+  // Method Oop Register when calling interpreter.
+  interpreter_method_oop_reg(R12);
+
+  // Number of stack slots consumed by locking an object
+  sync_stack_slots(2);
+
+  // Compiled code's Frame Pointer
+  frame_pointer(R31);
+
+  // Interpreter stores its frame pointer in a register which is
+  // stored to the stack by I2CAdaptors.
+  // I2CAdaptors convert from interpreted java to compiled java.
+  interpreter_frame_pointer(R29);
+
+  // Stack alignment requirement
+  stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
+
+  // Number of stack slots between incoming argument block and the start of
+  // a new frame.  The PROLOG must add this many slots to the stack.  The
+  // EPILOG must remove this many slots. aarch64 needs two slots for
+  // return address and fp.
+  // TODO think this is correct but check
+  in_preserve_stack_slots(4);
+
+  // Number of outgoing stack slots killed above the out_preserve_stack_slots
+  // for calls to C.  Supports the var-args backing area for register parms.
+  varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
+
+  // The after-PROLOG location of the return address.  Location of
+  // return address specifies a type (REG or STACK) and a number
+  // representing the register number (i.e. - use a register name) or
+  // stack slot.
+  // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
+  // Otherwise, it is above the locks and verification slot and alignment word
+  // TODO this may well be correct but need to check why that - 2 is there
+  // ppc port uses 0 but we definitely need to allow for fixed_slots
+  // which folds in the space used for monitors
+  return_addr(STACK - 2 +
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
+
+  // Body of function which returns an integer array locating
+  // arguments either in registers or in stack slots.  Passed an array
+  // of ideal registers called "sig" and a "length" count.  Stack-slot
+  // offsets are based on outgoing arguments, i.e. a CALLER setting up
+  // arguments for a CALLEE.  Incoming stack arguments are
+  // automatically biased by the preserve_stack_slots field above.
+
+  calling_convention
+  %{
+    // No difference between ingoing/outgoing just pass false
+    SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
+  %}
+
+  c_calling_convention
+  %{
+    // This is obviously always outgoing
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length);
+  %}
+
+  // Location of compiled Java return values.  Same as C for now.
+  return_value
+  %{
+    // TODO do we allow ideal_reg == Op_RegN???
+    assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
+           "only return normal values");
+
+    static const int lo[Op_RegL + 1] = { // enum name
+      0,                                 // Op_Node
+      0,                                 // Op_Set
+      R0_num,                            // Op_RegN
+      R0_num,                            // Op_RegI
+      R0_num,                            // Op_RegP
+      V0_num,                            // Op_RegF
+      V0_num,                            // Op_RegD
+      R0_num                             // Op_RegL
+    };
+
+    static const int hi[Op_RegL + 1] = { // enum name
+      0,                                 // Op_Node
+      0,                                 // Op_Set
+      OptoReg::Bad,                       // Op_RegN
+      OptoReg::Bad,                      // Op_RegI
+      R0_H_num,                          // Op_RegP
+      OptoReg::Bad,                      // Op_RegF
+      V0_H_num,                          // Op_RegD
+      R0_H_num                           // Op_RegL
+    };
+
+    return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
+  %}
+%}
+
+//----------ATTRIBUTES---------------------------------------------------------
+//----------Operand Attributes-------------------------------------------------
+op_attrib op_cost(1);        // Required cost attribute
+
+//----------Instruction Attributes---------------------------------------------
+ins_attrib ins_cost(INSN_COST); // Required cost attribute
+ins_attrib ins_size(32);        // Required size attribute (in bits)
+ins_attrib ins_short_branch(0); // Required flag: is this instruction
+                                // a non-matching short branch variant
+                                // of some long branch?
+ins_attrib ins_alignment(4);    // Required alignment attribute (must
+                                // be a power of 2) specifies the
+                                // alignment that some part of the
+                                // instruction (not necessarily the
+                                // start) requires.  If > 1, a
+                                // compute_padding() function must be
+                                // provided for the instruction
+
+//----------OPERANDS-----------------------------------------------------------
+// Operand definitions must precede instruction definitions for correct parsing
+// in the ADLC because operands constitute user defined types which are used in
+// instruction definitions.
+
+//----------Simple Operands----------------------------------------------------
+
+// Integer operands 32 bit
+// 32 bit immediate
+operand immI()
+%{
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit zero
+operand immI0()
+%{
+  predicate(n->get_int() == 0);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit unit increment
+operand immI_1()
+%{
+  predicate(n->get_int() == 1);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit unit decrement
+operand immI_M1()
+%{
+  predicate(n->get_int() == -1);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_le_4()
+%{
+  predicate(n->get_int() <= 4);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_31()
+%{
+  predicate(n->get_int() == 31);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_8()
+%{
+  predicate(n->get_int() == 8);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_16()
+%{
+  predicate(n->get_int() == 16);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_24()
+%{
+  predicate(n->get_int() == 24);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_32()
+%{
+  predicate(n->get_int() == 32);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_48()
+%{
+  predicate(n->get_int() == 48);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_56()
+%{
+  predicate(n->get_int() == 56);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_64()
+%{
+  predicate(n->get_int() == 64);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_255()
+%{
+  predicate(n->get_int() == 255);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_65535()
+%{
+  predicate(n->get_int() == 65535);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_63()
+%{
+  predicate(n->get_int() == 63);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_255()
+%{
+  predicate(n->get_int() == 255);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_65535()
+%{
+  predicate(n->get_long() == 65535L);
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_4294967295()
+%{
+  predicate(n->get_long() == 4294967295L);
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_bitmask()
+%{
+  predicate(((n->get_long() & 0xc000000000000000l) == 0)
+            && is_power_of_2(n->get_long() + 1));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI_bitmask()
+%{
+  predicate(((n->get_int() & 0xc0000000) == 0)
+            && is_power_of_2(n->get_int() + 1));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Scale values for scaled offset addressing modes (up to long but not quad)
+operand immIScale()
+%{
+  predicate(0 <= n->get_int() && (n->get_int() <= 3));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 26 bit signed offset -- for pc-relative branches
+operand immI26()
+%{
+  predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25)));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 19 bit signed offset -- for pc-relative loads
+operand immI19()
+%{
+  predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18)));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 12 bit unsigned offset -- for base plus immediate loads
+operand immIU12()
+%{
+  predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12)));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLU12()
+%{
+  predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12)));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Offset for scaled or unscaled immediate loads and stores
+operand immIOffset()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_int()));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLoffset()
+%{
+  predicate(Address::offset_ok_for_immed(n->get_long()));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit integer valid for add sub immediate
+operand immIAddSub()
+%{
+  predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit unsigned integer valid for logical immediate
+// TODO -- check this is right when e.g the mask is 0x80000000
+operand immILog()
+%{
+  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer operands 64 bit
+// 64 bit immediate
+operand immL()
+%{
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 64 bit zero
+operand immL0()
+%{
+  predicate(n->get_long() == 0);
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 64 bit unit increment
+operand immL_1()
+%{
+  predicate(n->get_long() == 1);
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 64 bit unit decrement
+operand immL_M1()
+%{
+  predicate(n->get_long() == -1);
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 32 bit offset of pc in thread anchor
+
+operand immL_pc_off()
+%{
+  predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) +
+                             in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
+  match(ConL);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 64 bit integer valid for add sub immediate
+operand immLAddSub()
+%{
+  predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// 64 bit integer valid for logical immediate
+operand immLLog()
+%{
+  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: low 32-bit mask
+operand immL_32bits()
+%{
+  predicate(n->get_long() == 0xFFFFFFFFL);
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer operands
+// Pointer Immediate
+operand immP()
+%{
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immP0()
+%{
+  predicate(n->get_ptr() == 0);
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate One
+// this is used in object initialization (initial object header)
+operand immP_1()
+%{
+  predicate(n->get_ptr() == 1);
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Polling Page Pointer Immediate
+operand immPollPage()
+%{
+  predicate((address)n->get_ptr() == os::get_polling_page());
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Card Table Byte Map Base
+operand immByteMapBase()
+%{
+  // Get base of card map
+  predicate((jbyte*)n->get_ptr() ==
+        ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate Minus One
+// this is used when we want to write the current PC to the thread anchor
+operand immP_M1()
+%{
+  predicate(n->get_ptr() == -1);
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate Minus Two
+// this is used when we want to write the current PC to the thread anchor
+operand immP_M2()
+%{
+  predicate(n->get_ptr() == -2);
+  match(ConP);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float and Double operands
+// Double Immediate
+operand immD()
+%{
+  match(ConD);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'double +0.0'.
+operand immD0()
+%{
+  predicate((n->getd() == 0) &&
+            (fpclassify(n->getd()) == FP_ZERO) && (signbit(n->getd()) == 0));
+  match(ConD);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'double +0.0'.
+operand immDPacked()
+%{
+  predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
+  match(ConD);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float Immediate
+operand immF()
+%{
+  match(ConF);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'float +0.0'.
+operand immF0()
+%{
+  predicate((n->getf() == 0) &&
+            (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
+  match(ConF);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+//
+operand immFPacked()
+%{
+  predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
+  match(ConF);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Narrow pointer operands
+// Narrow Pointer Immediate
+operand immN()
+%{
+  match(ConN);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Narrow NULL Pointer Immediate
+operand immN0()
+%{
+  predicate(n->get_narrowcon() == 0);
+  match(ConN);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immNKlass()
+%{
+  match(ConNKlass);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer 32 bit Register Operands
+// Integer 32 bitRegister (excludes SP)
+operand iRegI()
+%{
+  constraint(ALLOC_IN_RC(any_reg32));
+  match(RegI);
+  match(iRegINoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Integer 32 bit Register not Special
+operand iRegINoSp()
+%{
+  constraint(ALLOC_IN_RC(no_special_reg32));
+  match(RegI);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Integer 64 bit Register Operands
+// Integer 64 bit Register (includes SP)
+operand iRegL()
+%{
+  constraint(ALLOC_IN_RC(any_reg));
+  match(RegL);
+  match(iRegLNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Integer 64 bit Register not Special
+operand iRegLNoSp()
+%{
+  constraint(ALLOC_IN_RC(no_special_reg));
+  match(RegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer Register Operands
+// Pointer Register
+operand iRegP()
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(RegP);
+  match(iRegPNoSp);
+  match(iRegP_R0);
+  //match(iRegP_R2);
+  //match(iRegP_R4);
+  //match(iRegP_R5);
+  match(thread_RegP);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register not Special
+operand iRegPNoSp()
+%{
+  constraint(ALLOC_IN_RC(no_special_ptr_reg));
+  match(RegP);
+  // match(iRegP);
+  // match(iRegP_R0);
+  // match(iRegP_R2);
+  // match(iRegP_R4);
+  // match(iRegP_R5);
+  // match(thread_RegP);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R0 only
+operand iRegP_R0()
+%{
+  constraint(ALLOC_IN_RC(r0_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R1 only
+operand iRegP_R1()
+%{
+  constraint(ALLOC_IN_RC(r1_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R2 only
+operand iRegP_R2()
+%{
+  constraint(ALLOC_IN_RC(r2_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R3 only
+operand iRegP_R3()
+%{
+  constraint(ALLOC_IN_RC(r3_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R4 only
+operand iRegP_R4()
+%{
+  constraint(ALLOC_IN_RC(r4_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R5 only
+operand iRegP_R5()
+%{
+  constraint(ALLOC_IN_RC(r5_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register R10 only
+operand iRegP_R10()
+%{
+  constraint(ALLOC_IN_RC(r10_reg));
+  match(RegP);
+  // match(iRegP);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Long 64 bit Register R11 only
+operand iRegL_R11()
+%{
+  constraint(ALLOC_IN_RC(r11_reg));
+  match(RegL);
+  match(iRegLNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer 64 bit Register FP only
+operand iRegP_FP()
+%{
+  constraint(ALLOC_IN_RC(fp_reg));
+  match(RegP);
+  // match(iRegP);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Register R0 only
+operand iRegI_R0()
+%{
+  constraint(ALLOC_IN_RC(int_r0_reg));
+  match(RegI);
+  match(iRegINoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Register R2 only
+operand iRegI_R2()
+%{
+  constraint(ALLOC_IN_RC(int_r2_reg));
+  match(RegI);
+  match(iRegINoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Register R3 only
+operand iRegI_R3()
+%{
+  constraint(ALLOC_IN_RC(int_r3_reg));
+  match(RegI);
+  match(iRegINoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+
+// Register R2 only
+operand iRegI_R4()
+%{
+  constraint(ALLOC_IN_RC(int_r4_reg));
+  match(RegI);
+  match(iRegINoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+
+// Pointer Register Operands
+// Narrow Pointer Register
+operand iRegN()
+%{
+  constraint(ALLOC_IN_RC(any_reg32));
+  match(RegN);
+  match(iRegNNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Integer 64 bit Register not Special
+operand iRegNNoSp()
+%{
+  constraint(ALLOC_IN_RC(no_special_reg32));
+  match(RegN);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// heap base register -- used for encoding immN0
+
+operand iRegIHeapbase()
+%{
+  constraint(ALLOC_IN_RC(heapbase_reg));
+  match(RegI);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Float Register
+// Float register operands
+operand vRegF()
+%{
+  constraint(ALLOC_IN_RC(float_reg));
+  match(RegF);
+
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Double Register
+// Double register operands
+operand vRegD()
+%{
+  constraint(ALLOC_IN_RC(double_reg));
+  match(RegD);
+
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand vRegD_V0()
+%{
+  constraint(ALLOC_IN_RC(v0_reg));
+  match(RegD);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand vRegD_V1()
+%{
+  constraint(ALLOC_IN_RC(v1_reg));
+  match(RegD);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand vRegD_V2()
+%{
+  constraint(ALLOC_IN_RC(v2_reg));
+  match(RegD);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand vRegD_V3()
+%{
+  constraint(ALLOC_IN_RC(v3_reg));
+  match(RegD);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Flags register, used as output of signed compare instructions
+
+// note that on AArch64 we also use this register as the output for
+// for floating point compare instructions (CmpF CmpD). this ensures
+// that ordered inequality tests use GT, GE, LT or LE none of which
+// pass through cases where the result is unordered i.e. one or both
+// inputs to the compare is a NaN. this means that the ideal code can
+// replace e.g. a GT with an LE and not end up capturing the NaN case
+// (where the comparison should always fail). EQ and NE tests are
+// always generated in ideal code so that unordered folds into the NE
+// case, matching the behaviour of AArch64 NE.
+//
+// This differs from x86 where the outputs of FP compares use a
+// special FP flags registers and where compares based on this
+// register are distinguished into ordered inequalities (cmpOpUCF) and
+// EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
+// to explicitly handle the unordered case in branches. x86 also has
+// to include extra CMoveX rules to accept a cmpOpUCF input.
+
+operand rFlagsReg()
+%{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  op_cost(0);
+  format %{ "RFLAGS" %}
+  interface(REG_INTER);
+%}
+
+// Flags register, used as output of unsigned compare instructions
+operand rFlagsRegU()
+%{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+
+  op_cost(0);
+  format %{ "RFLAGSU" %}
+  interface(REG_INTER);
+%}
+
+// Special Registers
+
+// Method Register
+operand inline_cache_RegP(iRegP reg)
+%{
+  constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
+  match(reg);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand interpreter_method_oop_RegP(iRegP reg)
+%{
+  constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg
+  match(reg);
+  match(iRegPNoSp);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Thread Register
+operand thread_RegP(iRegP reg)
+%{
+  constraint(ALLOC_IN_RC(thread_reg)); // link_reg
+  match(reg);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand lr_RegP(iRegP reg)
+%{
+  constraint(ALLOC_IN_RC(lr_reg)); // link_reg
+  match(reg);
+  op_cost(0);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+//----------Memory Operands----------------------------------------------------
+
+operand indirect(iRegP reg)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(reg);
+  op_cost(0);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+operand indIndexScaledOffsetI(iRegP reg, iRegL lreg, immIScale scale, immIU12 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP reg (LShiftL lreg scale)) off);
+  op_cost(INSN_COST);
+  format %{ "$reg, $lreg lsl($scale), $off" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledOffsetL(iRegP reg, iRegL lreg, immIScale scale, immLU12 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP reg (LShiftL lreg scale)) off);
+  op_cost(INSN_COST);
+  format %{ "$reg, $lreg lsl($scale), $off" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledOffsetI2L(iRegP reg, iRegI ireg, immIScale scale, immLU12 off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
+  op_cost(INSN_COST);
+  format %{ "$reg, $ireg sxtw($scale), $off I2L" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg (LShiftL (ConvI2L ireg) scale));
+  op_cost(0);
+  format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg (LShiftL lreg scale));
+  op_cost(0);
+  format %{ "$reg, $lreg lsl($scale)" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+operand indIndex(iRegP reg, iRegL lreg)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg lreg);
+  op_cost(0);
+  format %{ "$reg, $lreg" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+operand indOffI(iRegP reg, immIOffset off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(INSN_COST);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffL(iRegP reg, immLoffset off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+
+operand indirectN(iRegN reg)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(DecodeN reg);
+  op_cost(0);
+  format %{ "[$reg]\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+operand indIndexScaledOffsetIN(iRegN reg, iRegL lreg, immIScale scale, immIU12 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
+  op_cost(0);
+  format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledOffsetLN(iRegN reg, iRegL lreg, immIScale scale, immLU12 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
+  op_cost(INSN_COST);
+  format %{ "$reg, $lreg lsl($scale), $off\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledOffsetI2LN(iRegN reg, iRegI ireg, immIScale scale, immLU12 off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)) off);
+  op_cost(INSN_COST);
+  format %{ "$reg, $ireg sxtw($scale), $off I2L\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale($scale);
+    disp($off);
+  %}
+%}
+
+operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
+  op_cost(0);
+  format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($ireg);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) (LShiftL lreg scale));
+  op_cost(0);
+  format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale($scale);
+    disp(0x0);
+  %}
+%}
+
+operand indIndexN(iRegN reg, iRegL lreg)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) lreg);
+  op_cost(0);
+  format %{ "$reg, $lreg\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index($lreg);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+operand indOffIN(iRegN reg, immIOffset off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) off);
+  op_cost(0);
+  format %{ "[$reg, $off]\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+operand indOffLN(iRegN reg, immLoffset off)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP (DecodeN reg) off);
+  op_cost(0);
+  format %{ "[$reg, $off]\t# narrow" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+
+
+// AArch64 opto stubs need to write to the pc slot in the thread anchor
+operand thread_anchor_pc(thread_RegP reg, immL_pc_off off)
+%{
+  constraint(ALLOC_IN_RC(ptr_reg));
+  match(AddP reg off);
+  op_cost(0);
+  format %{ "[$reg, $off]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0xffffffff);
+    scale(0x0);
+    disp($off);
+  %}
+%}
+
+//----------Special Memory Operands--------------------------------------------
+// Stack Slot Operand - This operand is used for loading and storing temporary
+//                      values on the stack where a match requires a value to
+//                      flow through memory.
+operand stackSlotP(sRegP reg)
+%{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  // No match rule because this operand is only generated in matching
+  // match(RegP);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1e);  // RSP
+    index(0x0);  // No Index
+    scale(0x0);  // No Scale
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotI(sRegI reg)
+%{
+  constraint(ALLOC_IN_RC(stack_slots));
+  // No match rule because this operand is only generated in matching
+  // match(RegI);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1e);  // RSP
+    index(0x0);  // No Index
+    scale(0x0);  // No Scale
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotF(sRegF reg)
+%{
+  constraint(ALLOC_IN_RC(stack_slots));
+  // No match rule because this operand is only generated in matching
+  // match(RegF);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1e);  // RSP
+    index(0x0);  // No Index
+    scale(0x0);  // No Scale
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotD(sRegD reg)
+%{
+  constraint(ALLOC_IN_RC(stack_slots));
+  // No match rule because this operand is only generated in matching
+  // match(RegD);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1e);  // RSP
+    index(0x0);  // No Index
+    scale(0x0);  // No Scale
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotL(sRegL reg)
+%{
+  constraint(ALLOC_IN_RC(stack_slots));
+  // No match rule because this operand is only generated in matching
+  // match(RegL);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1e);  // RSP
+    index(0x0);  // No Index
+    scale(0x0);  // No Scale
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+// Operands for expressing Control Flow
+// NOTE: Label is a predefined operand which should not be redefined in
+//       the AD file. It is generically handled within the ADLC.
+
+//----------Conditional Branch Operands----------------------------------------
+// Comparison Op  - This is the operation of the comparison, and is limited to
+//                  the following set of codes:
+//                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
+//
+// Other attributes of the comparison, such as unsignedness, are specified
+// by the comparison instruction that sets a condition code flags register.
+// That result is represented by a flags operand whose subtype is appropriate
+// to the unsignedness (etc.) of the comparison.
+//
+// Later, the instruction which matches both the Comparison Op (a Bool) and
+// the flags (produced by the Cmp) specifies the coding of the comparison op
+// by matching a specific subtype of Bool operand below, such as cmpOpU.
+
+// used for signed integral comparisons and fp comparisons
+
+operand cmpOp()
+%{
+  match(Bool);
+
+  format %{ "" %}
+  interface(COND_INTER) %{
+    equal(0x0, "eq");
+    not_equal(0x1, "ne");
+    less(0xb, "lt");
+    greater_equal(0xa, "ge");
+    less_equal(0xd, "le");
+    greater(0xc, "gt");
+    overflow(0x6, "vs");
+    no_overflow(0x7, "vc");
+  %}
+%}
+
+// used for unsigned integral comparisons
+
+operand cmpOpU()
+%{
+  match(Bool);
+
+  format %{ "" %}
+  interface(COND_INTER) %{
+    equal(0x0, "eq");
+    not_equal(0x1, "ne");
+    less(0x3, "lo");
+    greater_equal(0x2, "hs");
+    less_equal(0x9, "ls");
+    greater(0x8, "hi");
+    overflow(0x6, "vs");
+    no_overflow(0x7, "vc");
+  %}
+%}
+
+// Special operand allowing long args to int ops to be truncated for free
+
+operand iRegL2I(iRegL reg) %{
+
+  op_cost(0);
+
+  match(ConvL2I reg);
+
+  format %{ "l2i($reg)" %}
+
+  interface(REG_INTER)
+%}
+
+
+//----------OPERAND CLASSES----------------------------------------------------
+// Operand Classes are groups of operands that are used as to simplify
+// instruction definitions by not requiring the AD writer to specify
+// separate instructions for every form of operand when the
+// instruction accepts multiple operand types with the same basic
+// encoding and format. The classic case of this is memory operands.
+
+// memory is used to define read/write location for load/store
+// instruction defs. we can turn a memory op into an Address
+
+opclass memory(indirect, indIndexScaledOffsetI,  indIndexScaledOffsetL, indIndexScaledOffsetI2L, indIndexScaled, indIndexScaledI2L, indIndex, indOffI, indOffL,
+               indirectN, indIndexScaledOffsetIN,  indIndexScaledOffsetLN, indIndexScaledOffsetI2LN, indIndexScaledN, indIndexScaledI2LN, indIndexN, indOffIN, indOffLN);
+
+
+// iRegIorL2I is used for src inputs in rules for 32 bit int (I)
+// operations. it allows the src to be either an iRegI or a (ConvL2I
+// iRegL). in the latter case the l2i normally planted for a ConvL2I
+// can be elided because the 32-bit instruction will just employ the
+// lower 32 bits anyway.
+//
+// n.b. this does not elide all L2I conversions. if the truncated
+// value is consumed by more than one operation then the ConvL2I
+// cannot be bundled into the consuming nodes so an l2i gets planted
+// (actually a movw $dst $src) and the downstream instructions consume
+// the result of the l2i as an iRegI input. That's a shame since the
+// movw is actually redundant but its not too costly.
+
+opclass iRegIorL2I(iRegI, iRegL2I);
+
+//----------PIPELINE-----------------------------------------------------------
+// Rules which define the behavior of the target architectures pipeline.
+// Integer ALU reg operation
+pipeline %{
+
+attributes %{
+  // ARM instructions are of fixed length
+  fixed_size_instructions;        // Fixed size instructions TODO does
+  max_instructions_per_bundle = 2;   // A53 = 2, A57 = 4
+  // ARM instructions come in 32-bit word units
+  instruction_unit_size = 4;         // An instruction is 4 bytes long
+  instruction_fetch_unit_size = 64;  // The processor fetches one line
+  instruction_fetch_units = 1;       // of 64 bytes
+
+  // List of nop instructions
+  nops( MachNop );
+%}
+
+// We don't use an actual pipeline model so don't care about resources
+// or description. we do use pipeline classes to introduce fixed
+// latencies
+
+//----------RESOURCES----------------------------------------------------------
+// Resources are the functional units available to the machine
+
+resources( INS0, INS1, INS01 = INS0 | INS1,
+           ALU0, ALU1, ALU = ALU0 | ALU1,
+           MAC,
+           DIV,
+           BRANCH,
+           LDST,
+           NEON_FP);
+
+//----------PIPELINE DESCRIPTION-----------------------------------------------
+// Pipeline Description specifies the stages in the machine's pipeline
+
+pipe_desc(ISS, EX1, EX2, WR);
+
+//----------PIPELINE CLASSES---------------------------------------------------
+// Pipeline Classes describe the stages in which input and output are
+// referenced by the hardware pipeline.
+
+//------- Integer ALU operations --------------------------
+
+// Integer ALU reg-reg operation
+// Operands needed in EX1, result generated in EX2
+// Eg.  ADD     x0, x1, x2
+pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src1   : EX1(read);
+  src2   : EX1(read);
+  INS01  : ISS; // Dual issue as instruction 0 or 1
+  ALU    : EX2;
+%}
+
+// Integer ALU reg-reg operation with constant shift
+// Shifted register must be available in LATE_ISS instead of EX1
+// Eg.  ADD     x0, x1, x2, LSL #2
+pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src1   : EX1(read);
+  src2   : ISS(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Integer ALU reg operation with constant shift
+// Eg.  LSL     x0, x1, #shift
+pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src1   : ISS(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Integer ALU reg-reg operation with variable shift
+// Both operands must be available in LATE_ISS instead of EX1
+// Result is available in EX1 instead of EX2
+// Eg.  LSLV    x0, x1, x2
+pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  dst    : EX1(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS01  : ISS;
+  ALU    : EX1;
+%}
+
+// Integer ALU reg-reg operation with extract
+// As for _vshift above, but result generated in EX2
+// Eg.  EXTR    x0, x1, x2, #N
+pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS1   : ISS; // Can only dual issue as Instruction 1
+  ALU    : EX1;
+%}
+
+// Integer ALU reg operation
+// Eg.  NEG     x0, x1
+pipe_class ialu_reg(iRegI dst, iRegI src)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src    : EX1(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Integer ALU reg mmediate operation
+// Eg.  ADD     x0, x1, #N
+pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
+%{
+  single_instruction;
+  dst    : EX2(write);
+  src1   : EX1(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Integer ALU immediate operation (no source operands)
+// Eg.  MOV     x0, #N
+pipe_class ialu_imm(iRegI dst)
+%{
+  single_instruction;
+  dst    : EX1(write);
+  INS01  : ISS;
+  ALU    : EX1;
+%}
+
+//------- Compare operation -------------------------------
+
+// Compare reg-reg
+// Eg.  CMP     x0, x1
+pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
+%{
+  single_instruction;
+//  fixed_latency(16);
+  cr     : EX2(write);
+  op1    : EX1(read);
+  op2    : EX1(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Compare reg-reg
+// Eg.  CMP     x0, #N
+pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
+%{
+  single_instruction;
+//  fixed_latency(16);
+  cr     : EX2(write);
+  op1    : EX1(read);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+//------- Conditional instructions ------------------------
+
+// Conditional no operands
+// Eg.  CSINC   x0, zr, zr, <cond>
+pipe_class icond_none(iRegI dst, rFlagsReg cr)
+%{
+  single_instruction;
+  cr     : EX1(read);
+  dst    : EX2(write);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Conditional 2 operand
+// EG.  CSEL    X0, X1, X2, <cond>
+pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
+%{
+  single_instruction;
+  cr     : EX1(read);
+  src1   : EX1(read);
+  src2   : EX1(read);
+  dst    : EX2(write);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+// Conditional 2 operand
+// EG.  CSEL    X0, X1, X2, <cond>
+pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
+%{
+  single_instruction;
+  cr     : EX1(read);
+  src    : EX1(read);
+  dst    : EX2(write);
+  INS01  : ISS;
+  ALU    : EX2;
+%}
+
+//------- Multiply pipeline operations --------------------
+
+// Multiply reg-reg
+// Eg.  MUL     w0, w1, w2
+pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS01  : ISS;
+  MAC    : WR;
+%}
+
+// Multiply accumulate
+// Eg.  MADD    w0, w1, w2, w3
+pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
+%{
+  single_instruction;
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  src3   : ISS(read);
+  INS01  : ISS;
+  MAC    : WR;
+%}
+
+// Eg.  MUL     w0, w1, w2
+pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  fixed_latency(3); // Maximum latency for 64 bit mul
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS01  : ISS;
+  MAC    : WR;
+%}
+
+// Multiply accumulate
+// Eg.  MADD    w0, w1, w2, w3
+pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
+%{
+  single_instruction;
+  fixed_latency(3); // Maximum latency for 64 bit mul
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  src3   : ISS(read);
+  INS01  : ISS;
+  MAC    : WR;
+%}
+
+//------- Divide pipeline operations --------------------
+
+// Eg.  SDIV    w0, w1, w2
+pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  fixed_latency(8); // Maximum latency for 32 bit divide
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS0   : ISS; // Can only dual issue as instruction 0
+  DIV    : WR;
+%}
+
+// Eg.  SDIV    x0, x1, x2
+pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
+%{
+  single_instruction;
+  fixed_latency(16); // Maximum latency for 64 bit divide
+  dst    : WR(write);
+  src1   : ISS(read);
+  src2   : ISS(read);
+  INS0   : ISS; // Can only dual issue as instruction 0
+  DIV    : WR;
+%}
+
+//------- Load pipeline operations ------------------------
+
+// Load - prefetch
+// Eg.  PFRM    <mem>
+pipe_class iload_prefetch(memory mem)
+%{
+  single_instruction;
+  mem    : ISS(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+// Load - reg, mem
+// Eg.  LDR     x0, <mem>
+pipe_class iload_reg_mem(iRegI dst, memory mem)
+%{
+  single_instruction;
+  dst    : WR(write);
+  mem    : ISS(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+// Load - reg, reg
+// Eg.  LDR     x0, [sp, x1]
+pipe_class iload_reg_reg(iRegI dst, iRegI src)
+%{
+  single_instruction;
+  dst    : WR(write);
+  src    : ISS(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+//------- Store pipeline operations -----------------------
+
+// Store - zr, mem
+// Eg.  STR     zr, <mem>
+pipe_class istore_mem(memory mem)
+%{
+  single_instruction;
+  mem    : ISS(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+// Store - reg, mem
+// Eg.  STR     x0, <mem>
+pipe_class istore_reg_mem(iRegI src, memory mem)
+%{
+  single_instruction;
+  mem    : ISS(read);
+  src    : EX2(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+// Store - reg, reg
+// Eg. STR      x0, [sp, x1]
+pipe_class istore_reg_reg(iRegI dst, iRegI src)
+%{
+  single_instruction;
+  dst    : ISS(read);
+  src    : EX2(read);
+  INS01  : ISS;
+  LDST   : WR;
+%}
+
+//------- Store pipeline operations -----------------------
+
+// Branch
+pipe_class pipe_branch()
+%{
+  single_instruction;
+  INS01  : ISS;
+  BRANCH : EX1;
+%}
+
+// Conditional branch
+pipe_class pipe_branch_cond(rFlagsReg cr)
+%{
+  single_instruction;
+  cr     : EX1(read);
+  INS01  : ISS;
+  BRANCH : EX1;
+%}
+
+// Compare & Branch
+// EG.  CBZ/CBNZ
+pipe_class pipe_cmp_branch(iRegI op1)
+%{
+  single_instruction;
+  op1    : EX1(read);
+  INS01  : ISS;
+  BRANCH : EX1;
+%}
+
+//------- Synchronisation operations ----------------------
+
+// Any operation requiring serialization.
+// EG.  DMB/Atomic Ops/Load Acquire/Str Release
+pipe_class pipe_serial()
+%{
+  single_instruction;
+  force_serialization;
+  fixed_latency(16);
+  INS01  : ISS(2); // Cannot dual issue with any other instruction
+  LDST   : WR;
+%}
+
+// Generic big/slow expanded idiom - also serialized
+pipe_class pipe_slow()
+%{
+  instruction_count(10);
+  multiple_bundles;
+  force_serialization;
+  fixed_latency(16);
+  INS01  : ISS(2); // Cannot dual issue with any other instruction
+  LDST   : WR;
+%}
+
+// Empty pipeline class
+pipe_class pipe_class_empty()
+%{
+  single_instruction;
+  fixed_latency(0);
+%}
+
+// Default pipeline class.
+pipe_class pipe_class_default()
+%{
+  single_instruction;
+  fixed_latency(2);
+%}
+
+// Pipeline class for compares.
+pipe_class pipe_class_compare()
+%{
+  single_instruction;
+  fixed_latency(16);
+%}
+
+// Pipeline class for memory operations.
+pipe_class pipe_class_memory()
+%{
+  single_instruction;
+  fixed_latency(16);
+%}
+
+// Pipeline class for call.
+pipe_class pipe_class_call()
+%{
+  single_instruction;
+  fixed_latency(100);
+%}
+
+// Define the class for the Nop node.
+define %{
+   MachNop = pipe_class_empty;
+%}
+
+%}
+//----------INSTRUCTIONS-------------------------------------------------------
+//
+// match      -- States which machine-independent subtree may be replaced
+//               by this instruction.
+// ins_cost   -- The estimated cost of this instruction is used by instruction
+//               selection to identify a minimum cost tree of machine
+//               instructions that matches a tree of machine-independent
+//               instructions.
+// format     -- A string providing the disassembly for this instruction.
+//               The value of an instruction's operand may be inserted
+//               by referring to it with a '$' prefix.
+// opcode     -- Three instruction opcodes may be provided.  These are referred
+//               to within an encode class as $primary, $secondary, and $tertiary
+//               rrspectively.  The primary opcode is commonly used to
+//               indicate the type of machine instruction, while secondary
+//               and tertiary are often used for prefix options or addressing
+//               modes.
+// ins_encode -- A list of encode classes with parameters. The encode class
+//               name must have been defined in an 'enc_class' specification
+//               in the encode section of the architecture description.
+
+// ============================================================================
+// Memory (Load/Store) Instructions
+
+// Load Instructions
+
+// Load Byte (8 bit signed)
+instruct loadB(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadB mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrsbw  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldrsbw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Byte (8 bit signed) into long
+instruct loadB2L(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (ConvI2L (LoadB mem)));
+  predicate(UseBarriersForVolatile || n->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrsb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldrsb(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Byte (8 bit unsigned)
+instruct loadUB(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadUB mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrbw  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldrb(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Byte (8 bit unsigned) into long
+instruct loadUB2L(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (ConvI2L (LoadUB mem)));
+  predicate(UseBarriersForVolatile || n->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldrb(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Short (16 bit signed)
+instruct loadS(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadS mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrshw  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldrshw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Short (16 bit signed) into long
+instruct loadS2L(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (ConvI2L (LoadS mem)));
+  predicate(UseBarriersForVolatile || n->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrsh  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldrsh(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Char (16 bit unsigned)
+instruct loadUS(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadUS mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrh  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldrh(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Short/Char (16 bit unsigned) into long
+instruct loadUS2L(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (ConvI2L (LoadUS mem)));
+  predicate(UseBarriersForVolatile || n->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrh  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldrh(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Integer (32 bit signed)
+instruct loadI(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadI mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrw  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldrw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Integer (32 bit signed) into long
+instruct loadI2L(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (ConvI2L (LoadI mem)));
+  predicate(UseBarriersForVolatile || n->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrsw  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldrsw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Integer (32 bit unsigned) into long
+instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask)
+%{
+  match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
+  predicate(UseBarriersForVolatile || n->in(1)->in(1)->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrw  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldrw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Long (64 bit signed)
+instruct loadL(iRegLNoSp dst, memory mem)
+%{
+  match(Set dst (LoadL mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldr  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldr(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Range
+instruct loadRange(iRegINoSp dst, memory mem)
+%{
+  match(Set dst (LoadRange mem));
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrw  $dst, $mem\t# range" %}
+
+  ins_encode(aarch64_enc_ldrw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Pointer
+instruct loadP(iRegPNoSp dst, memory mem)
+%{
+  match(Set dst (LoadP mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldr  $dst, $mem\t# ptr" %}
+
+  ins_encode(aarch64_enc_ldr(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Compressed Pointer
+instruct loadN(iRegNNoSp dst, memory mem)
+%{
+  match(Set dst (LoadN mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
+
+  ins_encode(aarch64_enc_ldrw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Klass Pointer
+instruct loadKlass(iRegPNoSp dst, memory mem)
+%{
+  match(Set dst (LoadKlass mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldr  $dst, $mem\t# class" %}
+
+  ins_encode(aarch64_enc_ldr(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Narrow Klass Pointer
+instruct loadNKlass(iRegNNoSp dst, memory mem)
+%{
+  match(Set dst (LoadNKlass mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
+
+  ins_encode(aarch64_enc_ldrw(dst, mem));
+
+  ins_pipe(iload_reg_mem);
+%}
+
+// Load Float
+instruct loadF(vRegF dst, memory mem)
+%{
+  match(Set dst (LoadF mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrs  $dst, $mem\t# float" %}
+
+  ins_encode( aarch64_enc_ldrs(dst, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Double
+instruct loadD(vRegD dst, memory mem)
+%{
+  match(Set dst (LoadD mem));
+  predicate(UseBarriersForVolatile || n->as_Load()->is_unordered());
+
+  ins_cost(4 * INSN_COST);
+  format %{ "ldrd  $dst, $mem\t# double" %}
+
+  ins_encode( aarch64_enc_ldrd(dst, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+
+// Load Int Constant
+instruct loadConI(iRegINoSp dst, immI src)
+%{
+  match(Set dst src);
+
+  ins_cost(INSN_COST);
+  format %{ "mov $dst, $src\t# int" %}
+
+  ins_encode( aarch64_enc_movw_imm(dst, src) );
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Long Constant
+instruct loadConL(iRegLNoSp dst, immL src)
+%{
+  match(Set dst src);
+
+  ins_cost(INSN_COST);
+  format %{ "mov $dst, $src\t# long" %}
+
+  ins_encode( aarch64_enc_mov_imm(dst, src) );
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Pointer Constant
+
+instruct loadConP(iRegPNoSp dst, immP con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST * 4);
+  format %{
+    "mov  $dst, $con\t# ptr\n\t"
+  %}
+
+  ins_encode(aarch64_enc_mov_p(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Null Pointer Constant
+
+instruct loadConP0(iRegPNoSp dst, immP0 con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "mov  $dst, $con\t# NULL ptr" %}
+
+  ins_encode(aarch64_enc_mov_p0(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Pointer Constant One
+
+instruct loadConP1(iRegPNoSp dst, immP_1 con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "mov  $dst, $con\t# NULL ptr" %}
+
+  ins_encode(aarch64_enc_mov_p1(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Poll Page Constant
+
+instruct loadConPollPage(iRegPNoSp dst, immPollPage con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "adr  $dst, $con\t# Poll Page Ptr" %}
+
+  ins_encode(aarch64_enc_mov_poll_page(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Byte Map Base Constant
+
+instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "adr  $dst, $con\t# Byte Map Base" %}
+
+  ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Narrow Pointer Constant
+
+instruct loadConN(iRegNNoSp dst, immN con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST * 4);
+  format %{ "mov  $dst, $con\t# compressed ptr" %}
+
+  ins_encode(aarch64_enc_mov_n(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Narrow Null Pointer Constant
+
+instruct loadConN0(iRegNNoSp dst, immN0 con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "mov  $dst, $con\t# compressed NULL ptr" %}
+
+  ins_encode(aarch64_enc_mov_n0(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Narrow Klass Constant
+
+instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
+%{
+  match(Set dst con);
+
+  ins_cost(INSN_COST);
+  format %{ "mov  $dst, $con\t# compressed klass ptr" %}
+
+  ins_encode(aarch64_enc_mov_nk(dst, con));
+
+  ins_pipe(ialu_imm);
+%}
+
+// Load Packed Float Constant
+
+instruct loadConF_packed(vRegF dst, immFPacked con) %{
+  match(Set dst con);
+  ins_cost(INSN_COST * 4);
+  format %{ "fmovs  $dst, $con"%}
+  ins_encode %{
+    __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// Load Float Constant
+
+instruct loadConF(vRegF dst, immF con) %{
+  match(Set dst con);
+
+  ins_cost(INSN_COST * 4);
+
+  format %{
+    "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
+  %}
+
+  ins_encode %{
+    __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// Load Packed Double Constant
+
+instruct loadConD_packed(vRegD dst, immDPacked con) %{
+  match(Set dst con);
+  ins_cost(INSN_COST);
+  format %{ "fmovd  $dst, $con"%}
+  ins_encode %{
+    __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// Load Double Constant
+
+instruct loadConD(vRegD dst, immD con) %{
+  match(Set dst con);
+
+  ins_cost(INSN_COST * 5);
+  format %{
+    "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
+  %}
+
+  ins_encode %{
+    __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// Store Instructions
+
+// Store CMS card-mark Immediate
+instruct storeimmCM0(immI0 zero, memory mem)
+%{
+  match(Set mem (StoreCM mem zero));
+
+  ins_cost(INSN_COST);
+  format %{ "strb zr, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_strb0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Byte
+instruct storeB(iRegI src, memory mem)
+%{
+  match(Set mem (StoreB mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strb  $src, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_strb(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+
+instruct storeimmB0(immI0 zero, memory mem)
+%{
+  match(Set mem (StoreB mem zero));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strb zr, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_strb0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Char/Short
+instruct storeC(iRegI src, memory mem)
+%{
+  match(Set mem (StoreC mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strh  $src, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_strh(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+instruct storeimmC0(immI0 zero, memory mem)
+%{
+  match(Set mem (StoreC mem zero));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strh  zr, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_strh0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Integer
+
+instruct storeI(iRegIorL2I src, memory mem)
+%{
+  match(Set mem(StoreI mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strw  $src, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_strw(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+instruct storeimmI0(immI0 zero, memory mem)
+%{
+  match(Set mem(StoreI mem zero));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strw  zr, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_strw0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Long (64 bit signed)
+instruct storeL(iRegL src, memory mem)
+%{
+  match(Set mem (StoreL mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "str  $src, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_str(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+// Store Long (64 bit signed)
+instruct storeimmL0(immL0 zero, memory mem)
+%{
+  match(Set mem (StoreL mem zero));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "str  zr, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_str0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Pointer
+instruct storeP(iRegP src, memory mem)
+%{
+  match(Set mem (StoreP mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "str  $src, $mem\t# ptr" %}
+
+  ins_encode(aarch64_enc_str(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+// Store Pointer
+instruct storeimmP0(immP0 zero, memory mem)
+%{
+  match(Set mem (StoreP mem zero));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "str zr, $mem\t# ptr" %}
+
+  ins_encode(aarch64_enc_str0(mem));
+
+  ins_pipe(istore_mem);
+%}
+
+// Store Compressed Pointer
+instruct storeN(iRegN src, memory mem)
+%{
+  match(Set mem (StoreN mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strw  $src, $mem\t# compressed ptr" %}
+
+  ins_encode(aarch64_enc_strw(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem)
+%{
+  match(Set mem (StoreN mem zero));
+  predicate(Universe::narrow_oop_base() == NULL &&
+            Universe::narrow_klass_base() == NULL &&
+            (UseBarriersForVolatile || n->as_Store()->is_unordered()));
+
+  ins_cost(INSN_COST);
+  format %{ "strw  rheapbase, $mem\t# compressed ptr (rheapbase==0)" %}
+
+  ins_encode(aarch64_enc_strw(heapbase, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+// Store Float
+instruct storeF(vRegF src, memory mem)
+%{
+  match(Set mem (StoreF mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strs  $src, $mem\t# float" %}
+
+  ins_encode( aarch64_enc_strs(src, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// TODO
+// implement storeImmF0 and storeFImmPacked
+
+// Store Double
+instruct storeD(vRegD src, memory mem)
+%{
+  match(Set mem (StoreD mem src));
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+
+  ins_cost(INSN_COST);
+  format %{ "strd  $src, $mem\t# double" %}
+
+  ins_encode( aarch64_enc_strd(src, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Compressed Klass Pointer
+instruct storeNKlass(iRegN src, memory mem)
+%{
+  predicate(UseBarriersForVolatile || n->as_Store()->is_unordered());
+  match(Set mem (StoreNKlass mem src));
+
+  ins_cost(INSN_COST);
+  format %{ "strw  $src, $mem\t# compressed klass ptr" %}
+
+  ins_encode(aarch64_enc_strw(src, mem));
+
+  ins_pipe(istore_reg_mem);
+%}
+
+// TODO
+// implement storeImmD0 and storeDImmPacked
+
+// prefetch instructions
+// Must be safe to execute with invalid address (cannot fault).
+
+instruct prefetchr( memory mem ) %{
+  match(PrefetchRead mem);
+
+  ins_cost(INSN_COST);
+  format %{ "prfm $mem, PLDL1KEEP\t# Prefetch into level 1 cache read keep" %}
+
+  ins_encode( aarch64_enc_prefetchr(mem) );
+
+  ins_pipe(iload_prefetch);
+%}
+
+instruct prefetchw( memory mem ) %{
+  match(PrefetchAllocation mem);
+
+  ins_cost(INSN_COST);
+  format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
+
+  ins_encode( aarch64_enc_prefetchw(mem) );
+
+  ins_pipe(iload_prefetch);
+%}
+
+instruct prefetchnta( memory mem ) %{
+  match(PrefetchWrite mem);
+
+  ins_cost(INSN_COST);
+  format %{ "prfm $mem, PSTL1STRM\t# Prefetch into level 1 cache write streaming" %}
+
+  ins_encode( aarch64_enc_prefetchnta(mem) );
+
+  ins_pipe(iload_prefetch);
+%}
+
+//  ---------------- volatile loads and stores ----------------
+
+// Load Byte (8 bit signed)
+instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadB mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarsb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldarsb(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Byte (8 bit signed) into long
+instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (ConvI2L (LoadB mem)));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarsb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldarsb(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Byte (8 bit unsigned)
+instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadUB mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldarb(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Byte (8 bit unsigned) into long
+instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (ConvI2L (LoadUB mem)));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarb  $dst, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_ldarb(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Short (16 bit signed)
+instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadS mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarshw  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldarshw(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadUS mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarhw  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldarhw(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Short/Char (16 bit unsigned) into long
+instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (ConvI2L (LoadUS mem)));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarh  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldarh(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Short/Char (16 bit signed) into long
+instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (ConvI2L (LoadS mem)));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarh  $dst, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_ldarsh(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Integer (32 bit signed)
+instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadI mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarw  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldarw(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Integer (32 bit unsigned) into long
+instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
+%{
+  match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarw  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldarw(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Long (64 bit signed)
+instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadL mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldar  $dst, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_ldar(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Pointer
+instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadP mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldar  $dst, $mem\t# ptr" %}
+
+  ins_encode(aarch64_enc_ldar(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Compressed Pointer
+instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadN mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
+
+  ins_encode(aarch64_enc_ldarw(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Float
+instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadF mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldars  $dst, $mem\t# float" %}
+
+  ins_encode( aarch64_enc_fldars(dst, mem) );
+
+  ins_pipe(pipe_serial);
+%}
+
+// Load Double
+instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
+%{
+  match(Set dst (LoadD mem));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "ldard  $dst, $mem\t# double" %}
+
+  ins_encode( aarch64_enc_fldard(dst, mem) );
+
+  ins_pipe(pipe_serial);
+%}
+
+// Store Byte
+instruct storeB_volatile(iRegI src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreB mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrb  $src, $mem\t# byte" %}
+
+  ins_encode(aarch64_enc_stlrb(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Char/Short
+instruct storeC_volatile(iRegI src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreC mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrh  $src, $mem\t# short" %}
+
+  ins_encode(aarch64_enc_stlrh(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Integer
+
+instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem(StoreI mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrw  $src, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_stlrw(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Long (64 bit signed)
+instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreL mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlr  $src, $mem\t# int" %}
+
+  ins_encode(aarch64_enc_stlr(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Pointer
+instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreP mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlr  $src, $mem\t# ptr" %}
+
+  ins_encode(aarch64_enc_stlr(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Compressed Pointer
+instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreN mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrw  $src, $mem\t# compressed ptr" %}
+
+  ins_encode(aarch64_enc_stlrw(src, mem));
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Float
+instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreF mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrs  $src, $mem\t# float" %}
+
+  ins_encode( aarch64_enc_fstlrs(src, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+// TODO
+// implement storeImmF0 and storeFImmPacked
+
+// Store Double
+instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
+%{
+  match(Set mem (StoreD mem src));
+
+  ins_cost(VOLATILE_REF_COST);
+  format %{ "stlrd  $src, $mem\t# double" %}
+
+  ins_encode( aarch64_enc_fstlrd(src, mem) );
+
+  ins_pipe(pipe_class_memory);
+%}
+
+//  ---------------- end of volatile loads and stores ----------------
+
+// ============================================================================
+// BSWAP Instructions
+
+instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
+  match(Set dst (ReverseBytesI src));
+
+  ins_cost(INSN_COST);
+  format %{ "revw  $dst, $src" %}
+
+  ins_encode %{
+    __ revw(as_Register($dst$$reg), as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
+  match(Set dst (ReverseBytesL src));
+
+  ins_cost(INSN_COST);
+  format %{ "rev  $dst, $src" %}
+
+  ins_encode %{
+    __ rev(as_Register($dst$$reg), as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
+  match(Set dst (ReverseBytesUS src));
+
+  ins_cost(INSN_COST);
+  format %{ "rev16w  $dst, $src" %}
+
+  ins_encode %{
+    __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
+  match(Set dst (ReverseBytesS src));
+
+  ins_cost(INSN_COST);
+  format %{ "rev16w  $dst, $src\n\t"
+            "sbfmw $dst, $dst, #0, #15" %}
+
+  ins_encode %{
+    __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
+    __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// ============================================================================
+// Zero Count Instructions
+
+instruct countLeadingZerosI(iRegI dst, iRegI src) %{
+  match(Set dst (CountLeadingZerosI src));
+
+  ins_cost(INSN_COST);
+  format %{ "clzw  $dst, $src" %}
+  ins_encode %{
+    __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct countLeadingZerosL(iRegI dst, iRegL src) %{
+  match(Set dst (CountLeadingZerosL src));
+
+  ins_cost(INSN_COST);
+  format %{ "clz   $dst, $src" %}
+  ins_encode %{
+    __ clz(as_Register($dst$$reg), as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosI(iRegI dst, iRegI src) %{
+  match(Set dst (CountTrailingZerosI src));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "rbitw  $dst, $src\n\t"
+            "clzw   $dst, $dst" %}
+  ins_encode %{
+    __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
+    __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct countTrailingZerosL(iRegI dst, iRegL src) %{
+  match(Set dst (CountTrailingZerosL src));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "rbit   $dst, $src\n\t"
+            "clz    $dst, $dst" %}
+  ins_encode %{
+    __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
+    __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// ============================================================================
+// MemBar Instruction
+
+instruct load_fence() %{
+  match(LoadFence);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "load_fence" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct unnecessary_membar_acquire() %{
+  predicate(! UseBarriersForVolatile && preceded_by_ordered_load(n));
+  match(MemBarAcquire);
+  ins_cost(0);
+
+  format %{ "membar_acquire (elided)" %}
+
+  ins_encode %{
+    __ block_comment("membar_acquire (elided)");
+  %}
+
+  ins_pipe(pipe_class_empty);
+%}
+
+instruct membar_acquire() %{
+  match(MemBarAcquire);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_acquire" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+
+  ins_pipe(pipe_serial);
+%}
+
+
+instruct membar_acquire_lock() %{
+  match(MemBarAcquireLock);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_acquire_lock" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadLoad|Assembler::LoadStore);
+  %}
+
+  ins_pipe(pipe_serial);
+%}
+
+instruct store_fence() %{
+  match(StoreFence);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "store_fence" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct membar_release() %{
+  match(MemBarRelease);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_release" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct membar_storestore() %{
+  match(MemBarStoreStore);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "MEMBAR-store-store" %}
+
+  ins_encode %{
+    __ membar(Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct membar_release_lock() %{
+  match(MemBarReleaseLock);
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "membar_release_lock" %}
+
+  ins_encode %{
+    __ membar(Assembler::LoadStore|Assembler::StoreStore);
+  %}
+
+  ins_pipe(pipe_serial);
+%}
+
+instruct membar_volatile() %{
+  match(MemBarVolatile);
+  ins_cost(VOLATILE_REF_COST*100);
+
+  format %{ "membar_volatile" %}
+
+  ins_encode %{
+    __ membar(Assembler::StoreLoad);
+  %}
+
+  ins_pipe(pipe_serial);
+%}
+
+// ============================================================================
+// Cast/Convert Instructions
+
+instruct castX2P(iRegPNoSp dst, iRegL src) %{
+  match(Set dst (CastX2P src));
+
+  ins_cost(INSN_COST);
+  format %{ "mov $dst, $src\t# long -> ptr" %}
+
+  ins_encode %{
+    if ($dst$$reg != $src$$reg) {
+      __ mov(as_Register($dst$$reg), as_Register($src$$reg));
+    }
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct castP2X(iRegLNoSp dst, iRegP src) %{
+  match(Set dst (CastP2X src));
+
+  ins_cost(INSN_COST);
+  format %{ "mov $dst, $src\t# ptr -> long" %}
+
+  ins_encode %{
+    if ($dst$$reg != $src$$reg) {
+      __ mov(as_Register($dst$$reg), as_Register($src$$reg));
+    }
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// Convert oop into int for vectors alignment masking
+instruct convP2I(iRegINoSp dst, iRegP src) %{
+  match(Set dst (ConvL2I (CastP2X src)));
+
+  ins_cost(INSN_COST);
+  format %{ "movw $dst, $src\t# ptr -> int" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$Register);
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// Convert compressed oop into int for vectors alignment masking
+// in case of 32bit oops (heap < 4Gb).
+instruct convN2I(iRegINoSp dst, iRegN src)
+%{
+  predicate(Universe::narrow_oop_shift() == 0);
+  match(Set dst (ConvL2I (CastP2X (DecodeN src))));
+
+  ins_cost(INSN_COST);
+  format %{ "mov dst, $src\t# compressed ptr -> int" %}
+  ins_encode %{
+    __ movw($dst$$Register, $src$$Register);
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+
+// Convert oop pointer into compressed form
+instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
+  predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
+  match(Set dst (EncodeP src));
+  effect(KILL cr);
+  ins_cost(INSN_COST * 3);
+  format %{ "encode_heap_oop $dst, $src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    __ encode_heap_oop(d, s);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
+  predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
+  match(Set dst (EncodeP src));
+  ins_cost(INSN_COST * 3);
+  format %{ "encode_heap_oop_not_null $dst, $src" %}
+  ins_encode %{
+    __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
+  predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
+            n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
+  match(Set dst (DecodeN src));
+  ins_cost(INSN_COST * 3);
+  format %{ "decode_heap_oop $dst, $src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    __ decode_heap_oop(d, s);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
+  predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
+            n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
+  match(Set dst (DecodeN src));
+  ins_cost(INSN_COST * 3);
+  format %{ "decode_heap_oop_not_null $dst, $src" %}
+  ins_encode %{
+    Register s = $src$$Register;
+    Register d = $dst$$Register;
+    __ decode_heap_oop_not_null(d, s);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// n.b. AArch64 implementations of encode_klass_not_null and
+// decode_klass_not_null do not modify the flags register so, unlike
+// Intel, we don't kill CR as a side effect here
+
+instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
+  match(Set dst (EncodePKlass src));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "encode_klass_not_null $dst,$src" %}
+
+  ins_encode %{
+    Register src_reg = as_Register($src$$reg);
+    Register dst_reg = as_Register($dst$$reg);
+    __ encode_klass_not_null(dst_reg, src_reg);
+  %}
+
+   ins_pipe(ialu_reg);
+%}
+
+instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
+  match(Set dst (DecodeNKlass src));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "decode_klass_not_null $dst,$src" %}
+
+  ins_encode %{
+    Register src_reg = as_Register($src$$reg);
+    Register dst_reg = as_Register($dst$$reg);
+    if (dst_reg != src_reg) {
+      __ decode_klass_not_null(dst_reg, src_reg);
+    } else {
+      __ decode_klass_not_null(dst_reg);
+    }
+  %}
+
+   ins_pipe(ialu_reg);
+%}
+
+instruct checkCastPP(iRegPNoSp dst)
+%{
+  match(Set dst (CheckCastPP dst));
+
+  size(0);
+  format %{ "# checkcastPP of $dst" %}
+  ins_encode(/* empty encoding */);
+  ins_pipe(pipe_class_empty);
+%}
+
+instruct castPP(iRegPNoSp dst)
+%{
+  match(Set dst (CastPP dst));
+
+  size(0);
+  format %{ "# castPP of $dst" %}
+  ins_encode(/* empty encoding */);
+  ins_pipe(pipe_class_empty);
+%}
+
+instruct castII(iRegI dst)
+%{
+  match(Set dst (CastII dst));
+
+  size(0);
+  format %{ "# castII of $dst" %}
+  ins_encode(/* empty encoding */);
+  ins_cost(0);
+  ins_pipe(pipe_class_empty);
+%}
+
+// ============================================================================
+// Atomic operation instructions
+//
+// Intel and SPARC both implement Ideal Node LoadPLocked and
+// Store{PIL}Conditional instructions using a normal load for the
+// LoadPLocked and a CAS for the Store{PIL}Conditional.
+//
+// The ideal code appears only to use LoadPLocked/StorePLocked as a
+// pair to lock object allocations from Eden space when not using
+// TLABs.
+//
+// There does not appear to be a Load{IL}Locked Ideal Node and the
+// Ideal code appears to use Store{IL}Conditional as an alias for CAS
+// and to use StoreIConditional only for 32-bit and StoreLConditional
+// only for 64-bit.
+//
+// We implement LoadPLocked and StorePLocked instructions using,
+// respectively the AArch64 hw load-exclusive and store-conditional
+// instructions. Whereas we must implement each of
+// Store{IL}Conditional using a CAS which employs a pair of
+// instructions comprising a load-exclusive followed by a
+// store-conditional.
+
+
+// Locked-load (linked load) of the current heap-top
+// used when updating the eden heap top
+// implemented using ldaxr on AArch64
+
+instruct loadPLocked(iRegPNoSp dst, indirect mem)
+%{
+  match(Set dst (LoadPLocked mem));
+
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %}
+
+  ins_encode(aarch64_enc_ldaxr(dst, mem));
+
+  ins_pipe(pipe_serial);
+%}
+
+// Conditional-store of the updated heap-top.
+// Used during allocation of the shared heap.
+// Sets flag (EQ) on success.
+// implemented using stlxr on AArch64.
+
+instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr)
+%{
+  match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
+
+  ins_cost(VOLATILE_REF_COST);
+
+ // TODO
+ // do we need to do a store-conditional release or can we just use a
+ // plain store-conditional?
+
+  format %{
+    "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release"
+    "cmpw rscratch1, zr\t# EQ on successful write"
+  %}
+
+  ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr));
+
+  ins_pipe(pipe_serial);
+%}
+
+// this has to be implemented as a CAS
+instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr)
+%{
+  match(Set cr (StoreLConditional mem (Binary oldval newval)));
+
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{
+    "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
+    "cmpw rscratch1, zr\t# EQ on successful write"
+  %}
+
+  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval));
+
+  ins_pipe(pipe_slow);
+%}
+
+// this has to be implemented as a CAS
+instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr)
+%{
+  match(Set cr (StoreIConditional mem (Binary oldval newval)));
+
+  ins_cost(VOLATILE_REF_COST);
+
+  format %{
+    "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval"
+    "cmpw rscratch1, zr\t# EQ on successful write"
+  %}
+
+  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval));
+
+  ins_pipe(pipe_slow);
+%}
+
+// XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
+// can't match them
+
+instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
+
+  match(Set res (CompareAndSwapI mem (Binary oldval newval)));
+
+  effect(KILL cr);
+
+ format %{
+    "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
+            aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
+
+  match(Set res (CompareAndSwapL mem (Binary oldval newval)));
+
+  effect(KILL cr);
+
+ format %{
+    "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
+            aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+
+  match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+
+  effect(KILL cr);
+
+ format %{
+    "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
+            aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
+
+  match(Set res (CompareAndSwapN mem (Binary oldval newval)));
+
+  effect(KILL cr);
+
+ format %{
+    "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
+    "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
+ %}
+
+ ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
+            aarch64_enc_cset_eq(res));
+
+  ins_pipe(pipe_slow);
+%}
+
+
+instruct get_and_setI(indirect mem, iRegINoSp newv, iRegI prev) %{
+  match(Set prev (GetAndSetI mem newv));
+  format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setL(indirect mem, iRegLNoSp newv, iRegL prev) %{
+  match(Set prev (GetAndSetL mem newv));
+  format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setN(indirect mem, iRegNNoSp newv, iRegI prev) %{
+  match(Set prev (GetAndSetN mem newv));
+  format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_setP(indirect mem, iRegPNoSp newv, iRegP prev) %{
+  match(Set prev (GetAndSetP mem newv));
+  format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
+  ins_encode %{
+    __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+
+instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
+  match(Set newval (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addL $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addL [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
+  match(Set newval (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addL $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddL mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addL [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
+  match(Set newval (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addI $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addI [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
+  match(Set newval (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 10);
+  format %{ "get_and_addI $newval, [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
+  predicate(n->as_LoadStore()->result_not_used());
+  match(Set dummy (GetAndAddI mem incr));
+  ins_cost(INSN_COST * 9);
+  format %{ "get_and_addI [$mem], $incr" %}
+  ins_encode %{
+    __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
+  %}
+  ins_pipe(pipe_serial);
+%}
+
+// Manifest a CmpL result in an integer register.
+// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
+instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
+%{
+  match(Set dst (CmpL3 src1 src2));
+  effect(KILL flags);
+
+  ins_cost(INSN_COST * 6);
+  format %{
+      "cmp $src1, $src2"
+      "csetw $dst, ne"
+      "cnegw $dst, lt"
+  %}
+  // format %{ "CmpL3 $dst, $src1, $src2" %}
+  ins_encode %{
+    __ cmp($src1$$Register, $src2$$Register);
+    __ csetw($dst$$Register, Assembler::NE);
+    __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
+%{
+  match(Set dst (CmpL3 src1 src2));
+  effect(KILL flags);
+
+  ins_cost(INSN_COST * 6);
+  format %{
+      "cmp $src1, $src2"
+      "csetw $dst, ne"
+      "cnegw $dst, lt"
+  %}
+  ins_encode %{
+    int32_t con = (int32_t)$src2$$constant;
+     if (con < 0) {
+      __ adds(zr, $src1$$Register, -con);
+    } else {
+      __ subs(zr, $src1$$Register, con);
+    }
+    __ csetw($dst$$Register, Assembler::NE);
+    __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// ============================================================================
+// Conditional Move Instructions
+
+// n.b. we have identical rules for both a signed compare op (cmpOp)
+// and an unsigned compare op (cmpOpU). it would be nice if we could
+// define an op class which merged both inputs and use it to type the
+// argument to a single rule. unfortunatelyt his fails because the
+// opclass does not live up to the COND_INTER interface of its
+// component operands. When the generic code tries to negate the
+// operand it ends up running the generci Machoper::negate method
+// which throws a ShouldNotHappen. So, we have to provide two flavours
+// of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
+
+instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src2$$reg),
+             as_Register($src1$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegI src1, iRegI src2) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src2$$reg),
+             as_Register($src1$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+// special cases where one arg is zero
+
+// n.b. this is selected in preference to the rule above because it
+// avoids loading constant 0 into a source register
+
+// TODO
+// we ought only to be able to cull one of these variants as the ideal
+// transforms ought always to order the zero consistently (to left/right?)
+
+instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src$$reg),
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegI src) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src$$reg),
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegI src, immI0 zero) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             zr,
+             as_Register($src$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegI src, immI0 zero) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             zr,
+             as_Register($src$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+// special case for creating a boolean 0 or 1
+
+// n.b. this is selected in preference to the rule above because it
+// avoids loading constants 0 and 1 into a source register
+
+instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
+
+  ins_encode %{
+    // equivalently
+    // cset(as_Register($dst$$reg),
+    //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
+    __ csincw(as_Register($dst$$reg),
+             zr,
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_none);
+%}
+
+instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
+  match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
+
+  ins_encode %{
+    // equivalently
+    // cset(as_Register($dst$$reg),
+    //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
+    __ csincw(as_Register($dst$$reg),
+             zr,
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_none);
+%}
+
+instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src2$$reg),
+            as_Register($src1$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src2$$reg),
+            as_Register($src1$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+// special cases where one arg is zero
+
+instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            zr,
+            as_Register($src$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            zr,
+            as_Register($src$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            zr,
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
+  match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            zr,
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src2$$reg),
+            as_Register($src1$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src2$$reg),
+            as_Register($src1$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+// special cases where one arg is zero
+
+instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            zr,
+            as_Register($src$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            zr,
+            as_Register($src$$reg),
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            zr,
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
+  match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
+
+  ins_encode %{
+    __ csel(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            zr,
+            (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src2$$reg),
+             as_Register($src1$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src2$$reg),
+             as_Register($src1$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg_reg);
+%}
+
+// special cases where one arg is zero
+
+instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             zr,
+             as_Register($src$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             zr,
+             as_Register($src$$reg),
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src$$reg),
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
+  match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
+
+  ins_encode %{
+    __ cselw(as_Register($dst$$reg),
+             as_Register($src$$reg),
+             zr,
+             (Assembler::Condition)$cmp$$cmpcode);
+  %}
+
+  ins_pipe(icond_reg);
+%}
+
+instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
+%{
+  match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+
+  format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
+  ins_encode %{
+    Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
+    __ fcsels(as_FloatRegister($dst$$reg),
+              as_FloatRegister($src2$$reg),
+              as_FloatRegister($src1$$reg),
+              cond);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
+%{
+  match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+
+  format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
+  ins_encode %{
+    Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
+    __ fcsels(as_FloatRegister($dst$$reg),
+              as_FloatRegister($src2$$reg),
+              as_FloatRegister($src1$$reg),
+              cond);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
+%{
+  match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+
+  format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
+  ins_encode %{
+    Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
+    __ fcseld(as_FloatRegister($dst$$reg),
+              as_FloatRegister($src2$$reg),
+              as_FloatRegister($src1$$reg),
+              cond);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
+%{
+  match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+
+  format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
+  ins_encode %{
+    Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
+    __ fcseld(as_FloatRegister($dst$$reg),
+              as_FloatRegister($src2$$reg),
+              as_FloatRegister($src1$$reg),
+              cond);
+  %}
+
+  ins_pipe(pipe_class_default);
+%}
+
+// ============================================================================
+// Arithmetic Instructions
+//
+
+// Integer Addition
+
+// TODO
+// these currently employ operations which do not set CR and hence are
+// not flagged as killing CR but we would like to isolate the cases
+// where we want to set flags from those where we don't. need to work
+// out how to do that.
+
+instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (AddI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "addw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ addw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addI_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2) %{
+  match(Set dst (AddI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "addw $dst, $src1, $src2" %}
+
+  // use opcode to indicate that this is an add not a sub
+  opcode(0x0);
+
+  ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
+  match(Set dst (AddI (ConvL2I src1) src2));
+
+  ins_cost(INSN_COST);
+  format %{ "addw $dst, $src1, $src2" %}
+
+  // use opcode to indicate that this is an add not a sub
+  opcode(0x0);
+
+  ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Pointer Addition
+instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{
+  match(Set dst (AddP src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "add $dst, $src1, $src2\t# ptr" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{
+  match(Set dst (AddP src1 (ConvI2L src2)));
+
+  ins_cost(INSN_COST);
+  format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           as_Register($src2$$reg), ext::sxtw);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{
+  match(Set dst (AddP src1 (LShiftL src2 scale)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
+
+  ins_encode %{
+    __ lea(as_Register($dst$$reg),
+           Address(as_Register($src1$$reg), as_Register($src2$$reg),
+                   Address::lsl($scale$$constant)));
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{
+  match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
+
+  ins_encode %{
+    __ lea(as_Register($dst$$reg),
+           Address(as_Register($src1$$reg), as_Register($src2$$reg),
+                   Address::sxtw($scale$$constant)));
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
+  match(Set dst (LShiftL (ConvI2L src) scale));
+
+  ins_cost(INSN_COST);
+  format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
+
+  ins_encode %{
+    __ sbfiz(as_Register($dst$$reg),
+          as_Register($src$$reg),
+          $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63));
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Pointer Immediate Addition
+// n.b. this needs to be more expensive than using an indirect memory
+// operand
+instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{
+  match(Set dst (AddP src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "add $dst, $src1, $src2\t# ptr" %}
+
+  // use opcode to indicate that this is an add not a sub
+  opcode(0x0);
+
+  ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Long Addition
+instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
+
+  match(Set dst (AddL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+// No constant pool entries requiredLong Immediate Addition.
+instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
+  match(Set dst (AddL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "add $dst, $src1, $src2" %}
+
+  // use opcode to indicate that this is an add not a sub
+  opcode(0x0);
+
+  ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Integer Subtraction
+instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (SubI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "subw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ subw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+// Immediate Subtraction
+instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
+  match(Set dst (SubI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "subw $dst, $src1, $src2" %}
+
+  // use opcode to indicate that this is a sub not an add
+  opcode(0x1);
+
+  ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Long Subtraction
+instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
+
+  match(Set dst (SubL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "sub  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ sub(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+// No constant pool entries requiredLong Immediate Subtraction.
+instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
+  match(Set dst (SubL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "sub$dst, $src1, $src2" %}
+
+  // use opcode to indicate that this is a sub not an add
+  opcode(0x1);
+
+  ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
+
+  ins_pipe(ialu_reg_imm);
+%}
+
+// Integer Negation (special case for sub)
+
+instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
+  match(Set dst (SubI zero src));
+
+  ins_cost(INSN_COST);
+  format %{ "negw $dst, $src\t# int" %}
+
+  ins_encode %{
+    __ negw(as_Register($dst$$reg),
+            as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// Long Negation
+
+instruct negL_reg(iRegLNoSp dst, iRegIorL2I src, immL0 zero, rFlagsReg cr) %{
+  match(Set dst (SubL zero src));
+
+  ins_cost(INSN_COST);
+  format %{ "neg $dst, $src\t# long" %}
+
+  ins_encode %{
+    __ neg(as_Register($dst$$reg),
+           as_Register($src$$reg));
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+// Integer Multiply
+
+instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (MulI src1 src2));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "mulw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mulw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(imul_reg_reg);
+%}
+
+instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "smull  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ smull(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(imul_reg_reg);
+%}
+
+// Long Multiply
+
+instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
+  match(Set dst (MulL src1 src2));
+
+  ins_cost(INSN_COST * 5);
+  format %{ "mul  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mul(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           as_Register($src2$$reg));
+  %}
+
+  ins_pipe(lmul_reg_reg);
+%}
+
+instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
+%{
+  match(Set dst (MulHiL src1 src2));
+
+  ins_cost(INSN_COST * 7);
+  format %{ "smulh   $dst, $src1, $src2, \t# mulhi" %}
+
+  ins_encode %{
+    __ smulh(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(lmul_reg_reg);
+%}
+
+// Combined Integer Multiply & Add/Sub
+
+instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
+  match(Set dst (AddI src3 (MulI src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "madd  $dst, $src1, $src2, $src3" %}
+
+  ins_encode %{
+    __ maddw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg),
+             as_Register($src3$$reg));
+  %}
+
+  ins_pipe(imac_reg_reg);
+%}
+
+instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
+  match(Set dst (SubI src3 (MulI src1 src2)));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "msub  $dst, $src1, $src2, $src3" %}
+
+  ins_encode %{
+    __ msubw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg),
+             as_Register($src3$$reg));
+  %}
+
+  ins_pipe(imac_reg_reg);
+%}
+
+// Combined Long Multiply & Add/Sub
+
+instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
+  match(Set dst (AddL src3 (MulL src1 src2)));
+
+  ins_cost(INSN_COST * 5);
+  format %{ "madd  $dst, $src1, $src2, $src3" %}
+
+  ins_encode %{
+    __ madd(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg),
+            as_Register($src3$$reg));
+  %}
+
+  ins_pipe(lmac_reg_reg);
+%}
+
+instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
+  match(Set dst (SubL src3 (MulL src1 src2)));
+
+  ins_cost(INSN_COST * 5);
+  format %{ "msub  $dst, $src1, $src2, $src3" %}
+
+  ins_encode %{
+    __ msub(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg),
+            as_Register($src3$$reg));
+  %}
+
+  ins_pipe(lmac_reg_reg);
+%}
+
+// Integer Divide
+
+instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (DivI src1 src2));
+
+  ins_cost(INSN_COST * 19);
+  format %{ "sdivw  $dst, $src1, $src2" %}
+
+  ins_encode(aarch64_enc_divw(dst, src1, src2));
+  ins_pipe(idiv_reg_reg);
+%}
+
+instruct signExtract(iRegINoSp dst, iRegI src1, immI_31 div1, immI_31 div2) %{
+  match(Set dst (URShiftI (RShiftI src1 div1) div2));
+  ins_cost(INSN_COST);
+  format %{ "lsrw $dst, $src1, $div1" %}
+  ins_encode %{
+    __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+instruct div2Round(iRegINoSp dst, iRegI src, immI_31 div1, immI_31 div2) %{
+  match(Set dst (AddI src (URShiftI (RShiftI src div1) div2)));
+  ins_cost(INSN_COST);
+  format %{ "addw $dst, $src, LSR $div1" %}
+
+  ins_encode %{
+    __ addw(as_Register($dst$$reg),
+              as_Register($src$$reg),
+              as_Register($src$$reg),
+              Assembler::LSR, 31);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// Long Divide
+
+instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
+  match(Set dst (DivL src1 src2));
+
+  ins_cost(INSN_COST * 35);
+  format %{ "sdiv   $dst, $src1, $src2" %}
+
+  ins_encode(aarch64_enc_div(dst, src1, src2));
+  ins_pipe(ldiv_reg_reg);
+%}
+
+instruct signExtractL(iRegLNoSp dst, iRegL src1, immL_63 div1, immL_63 div2) %{
+  match(Set dst (URShiftL (RShiftL src1 div1) div2));
+  ins_cost(INSN_COST);
+  format %{ "lsr $dst, $src1, $div1" %}
+  ins_encode %{
+    __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+instruct div2RoundL(iRegLNoSp dst, iRegL src, immL_63 div1, immL_63 div2) %{
+  match(Set dst (AddL src (URShiftL (RShiftL src div1) div2)));
+  ins_cost(INSN_COST);
+  format %{ "add $dst, $src, $div1" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+              as_Register($src$$reg),
+              as_Register($src$$reg),
+              Assembler::LSR, 63);
+  %}
+  ins_pipe(ialu_reg);
+%}
+
+// Integer Remainder
+
+instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (ModI src1 src2));
+
+  ins_cost(INSN_COST * 22);
+  format %{ "sdivw  rscratch1, $src1, $src2\n\t"
+            "msubw($dst, rscratch1, $src2, $src1" %}
+
+  ins_encode(aarch64_enc_modw(dst, src1, src2));
+  ins_pipe(idiv_reg_reg);
+%}
+
+// Long Remainder
+
+instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
+  match(Set dst (ModL src1 src2));
+
+  ins_cost(INSN_COST * 38);
+  format %{ "sdiv   rscratch1, $src1, $src2\n"
+            "msub($dst, rscratch1, $src2, $src1" %}
+
+  ins_encode(aarch64_enc_mod(dst, src1, src2));
+  ins_pipe(ldiv_reg_reg);
+%}
+
+// Integer Shifts
+
+// Shift Left Register
+instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (LShiftI src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "lslvw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ lslvw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Left Immediate
+instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
+  match(Set dst (LShiftI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
+
+  ins_encode %{
+    __ lslw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            $src2$$constant & 0x1f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Right Logical Register
+instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (URShiftI src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "lsrvw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ lsrvw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Right Logical Immediate
+instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
+  match(Set dst (URShiftI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
+
+  ins_encode %{
+    __ lsrw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            $src2$$constant & 0x1f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Right Arithmetic Register
+instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
+  match(Set dst (RShiftI src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "asrvw  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ asrvw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Right Arithmetic Immediate
+instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
+  match(Set dst (RShiftI src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
+
+  ins_encode %{
+    __ asrw(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            $src2$$constant & 0x1f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Combined Int Mask and Right Shift (using UBFM)
+// TODO
+
+// Long Shifts
+
+// Shift Left Register
+instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "lslv  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ lslv(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Left Immediate
+instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
+  match(Set dst (LShiftL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
+
+  ins_encode %{
+    __ lsl(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            $src2$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Right Logical Register
+instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "lsrv  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ lsrv(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Right Logical Immediate
+instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
+
+  ins_encode %{
+    __ lsr(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           $src2$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// A special-case pattern for card table stores.
+instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
+  match(Set dst (URShiftL (CastP2X src1) src2));
+
+  ins_cost(INSN_COST);
+  format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
+
+  ins_encode %{
+    __ lsr(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           $src2$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Right Arithmetic Register
+instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "asrv  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ asrv(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// Shift Right Arithmetic Immediate
+instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+  ins_cost(INSN_COST);
+  format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
+
+  ins_encode %{
+    __ asr(as_Register($dst$$reg),
+           as_Register($src1$$reg),
+           $src2$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// BEGIN This section of the file is automatically generated. Do not edit --------------
+
+instruct regL_not_reg(iRegLNoSp dst,
+                         iRegL src1, immL_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (XorL src1 m1));
+  ins_cost(INSN_COST);
+  format %{ "eon  $dst, $src1, zr" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              zr,
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+instruct regI_not_reg(iRegINoSp dst,
+                         iRegI src1, immI_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (XorI src1 m1));
+  ins_cost(INSN_COST);
+  format %{ "eonw  $dst, $src1, zr" %}
+
+  ins_encode %{
+    __ eonw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              zr,
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg);
+%}
+
+instruct AndI_reg_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2, immI_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (AndI src1 (XorI src2 m1)));
+  ins_cost(INSN_COST);
+  format %{ "bic  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ bic(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AndL_reg_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2, immL_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (AndL src1 (XorL src2 m1)));
+  ins_cost(INSN_COST);
+  format %{ "bic  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ bic(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct OrI_reg_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2, immI_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (OrI src1 (XorI src2 m1)));
+  ins_cost(INSN_COST);
+  format %{ "orn  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ orn(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct OrL_reg_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2, immL_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (OrL src1 (XorL src2 m1)));
+  ins_cost(INSN_COST);
+  format %{ "orn  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ orn(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct XorI_reg_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2, immI_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (XorI m1 (XorI src2 src1)));
+  ins_cost(INSN_COST);
+  format %{ "eon  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct XorL_reg_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2, immL_M1 m1,
+                         rFlagsReg cr) %{
+  match(Set dst (XorL m1 (XorL src2 src1)));
+  ins_cost(INSN_COST);
+  format %{ "eon  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL, 0);
+  %}
+
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ bicw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ bic(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ bicw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ bic(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ bicw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ bic(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ eonw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ eonw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ eonw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ eon(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ ornw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ orn(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ ornw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ orn(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, immI_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ ornw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, immL_M1 src4, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ orn(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndI_reg_URShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (URShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ andw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_URShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (URShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ andr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndI_reg_RShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (RShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ andw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_RShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (RShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ andr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndI_reg_LShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndI src1 (LShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ andw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AndL_reg_LShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AndL src1 (LShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ andr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_URShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorI src1 (URShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ eorw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_URShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorL src1 (URShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ eor(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_RShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorI src1 (RShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ eorw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_RShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorL src1 (RShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ eor(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorI_reg_LShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorI src1 (LShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ eorw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct XorL_reg_LShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (XorL src1 (LShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ eor(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_URShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (URShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ orrw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_URShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (URShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ orr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_RShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (RShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ orrw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_RShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (RShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ orr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrI_reg_LShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrI src1 (LShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ orrw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct OrL_reg_LShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (OrL src1 (LShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ orr(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddI_reg_URShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddI src1 (URShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ addw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddL_reg_URShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddL src1 (URShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddI_reg_RShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddI src1 (RShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ addw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddL_reg_RShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddL src1 (RShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddI_reg_LShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddI src1 (LShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ addw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct AddL_reg_LShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (AddL src1 (LShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "add  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ add(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubI_reg_URShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubI src1 (URShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ subw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubL_reg_URShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubL src1 (URShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
+
+  ins_encode %{
+    __ sub(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubI_reg_RShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubI src1 (RShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ subw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubL_reg_RShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubL src1 (RShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
+
+  ins_encode %{
+    __ sub(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::ASR,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubI_reg_LShift_reg(iRegINoSp dst,
+                         iRegI src1, iRegI src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubI src1 (LShiftI src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ subw(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+instruct SubL_reg_LShift_reg(iRegLNoSp dst,
+                         iRegL src1, iRegL src2,
+                         immI src3, rFlagsReg cr) %{
+  match(Set dst (SubL src1 (LShiftL src2 src3)));
+
+  ins_cost(1.9 * INSN_COST);
+  format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
+
+  ins_encode %{
+    __ sub(as_Register($dst$$reg),
+              as_Register($src1$$reg),
+              as_Register($src2$$reg),
+              Assembler::LSL,
+              $src3$$constant & 0x3f);
+  %}
+
+  ins_pipe(ialu_reg_reg_shift);
+%}
+
+
+
+// Shift Left followed by Shift Right.
+// This idiom is used by the compiler for the i2b bytecode etc.
+instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
+%{
+  match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what sbfm can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 63
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
+
+  ins_cost(INSN_COST * 2);
+  format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
+  ins_encode %{
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
+    int s = 63 - lshift;
+    int r = (rshift - lshift) & 63;
+    __ sbfm(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            r, s);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Left followed by Shift Right.
+// This idiom is used by the compiler for the i2b bytecode etc.
+instruct sbfmwI(iRegINoSp dst, iRegI src, immI lshift_count, immI rshift_count)
+%{
+  match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what sbfmw can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 31
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
+
+  ins_cost(INSN_COST * 2);
+  format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
+  ins_encode %{
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
+    int s = 31 - lshift;
+    int r = (rshift - lshift) & 31;
+    __ sbfmw(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            r, s);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Left followed by Shift Right.
+// This idiom is used by the compiler for the i2b bytecode etc.
+instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
+%{
+  match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what ubfm can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 63
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 63);
+
+  ins_cost(INSN_COST * 2);
+  format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
+  ins_encode %{
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
+    int s = 63 - lshift;
+    int r = (rshift - lshift) & 63;
+    __ ubfm(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            r, s);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Shift Left followed by Shift Right.
+// This idiom is used by the compiler for the i2b bytecode etc.
+instruct ubfmwI(iRegINoSp dst, iRegI src, immI lshift_count, immI rshift_count)
+%{
+  match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
+  // Make sure we are not going to exceed what ubfmw can do.
+  predicate((unsigned int)n->in(2)->get_int() <= 31
+            && (unsigned int)n->in(1)->in(2)->get_int() <= 31);
+
+  ins_cost(INSN_COST * 2);
+  format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
+  ins_encode %{
+    int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant;
+    int s = 31 - lshift;
+    int r = (rshift - lshift) & 31;
+    __ ubfmw(as_Register($dst$$reg),
+            as_Register($src$$reg),
+            r, s);
+  %}
+
+  ins_pipe(ialu_reg_shift);
+%}
+// Bitfield extract with shift & mask
+
+instruct ubfxwI(iRegINoSp dst, iRegI src, immI rshift, immI_bitmask mask)
+%{
+  match(Set dst (AndI (URShiftI src rshift) mask));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfxw $dst, $src, $mask" %}
+  ins_encode %{
+    int rshift = $rshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfxw(as_Register($dst$$reg),
+            as_Register($src$$reg), rshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
+%{
+  match(Set dst (AndL (URShiftL src rshift) mask));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfx $dst, $src, $mask" %}
+  ins_encode %{
+    int rshift = $rshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfx(as_Register($dst$$reg),
+            as_Register($src$$reg), rshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+// We can use ubfx when extending an And with a mask when we know mask
+// is positive.  We know that because immI_bitmask guarantees it.
+instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
+%{
+  match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
+
+  ins_cost(INSN_COST * 2);
+  format %{ "ubfx $dst, $src, $mask" %}
+  ins_encode %{
+    int rshift = $rshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfx(as_Register($dst$$reg),
+            as_Register($src$$reg), rshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+// Rotations
+
+instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
+%{
+  match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
+
+  ins_cost(INSN_COST);
+  format %{ "extr $dst, $src1, $src2, #$rshift" %}
+
+  ins_encode %{
+    __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
+            $rshift$$constant & 63);
+  %}
+  ins_pipe(ialu_reg_reg_extr);
+%}
+
+instruct extrOrI(iRegINoSp dst, iRegI src1, iRegI src2, immI lshift, immI rshift, rFlagsReg cr)
+%{
+  match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
+
+  ins_cost(INSN_COST);
+  format %{ "extr $dst, $src1, $src2, #$rshift" %}
+
+  ins_encode %{
+    __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
+            $rshift$$constant & 31);
+  %}
+  ins_pipe(ialu_reg_reg_extr);
+%}
+
+instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63));
+
+  ins_cost(INSN_COST);
+  format %{ "extr $dst, $src1, $src2, #$rshift" %}
+
+  ins_encode %{
+    __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
+            $rshift$$constant & 63);
+  %}
+  ins_pipe(ialu_reg_reg_extr);
+%}
+
+instruct extrAddI(iRegINoSp dst, iRegI src1, iRegI src2, immI lshift, immI rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31));
+
+  ins_cost(INSN_COST);
+  format %{ "extr $dst, $src1, $src2, #$rshift" %}
+
+  ins_encode %{
+    __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
+            $rshift$$constant & 31);
+  %}
+  ins_pipe(ialu_reg_reg_extr);
+%}
+
+
+// rol expander
+
+instruct rolL_rReg(iRegL dst, iRegL src, iRegI shift, rFlagsReg cr)
+%{
+  effect(DEF dst, USE src, USE shift);
+
+  format %{ "rol    $dst, $src, $shift" %}
+  ins_cost(INSN_COST * 3);
+  ins_encode %{
+    __ subw(rscratch1, zr, as_Register($shift$$reg));
+    __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
+            rscratch1);
+    %}
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// rol expander
+
+instruct rolI_rReg(iRegI dst, iRegI src, iRegI shift, rFlagsReg cr)
+%{
+  effect(DEF dst, USE src, USE shift);
+
+  format %{ "rol    $dst, $src, $shift" %}
+  ins_cost(INSN_COST * 3);
+  ins_encode %{
+    __ subw(rscratch1, zr, as_Register($shift$$reg));
+    __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
+            rscratch1);
+    %}
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+instruct rolL_rReg_Var_C_64(iRegL dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
+%{
+  match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
+
+  expand %{
+    rolL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rolL_rReg_Var_C0(iRegL dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
+%{
+  match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
+
+  expand %{
+    rolL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rolI_rReg_Var_C_32(iRegL dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
+%{
+  match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
+
+  expand %{
+    rolL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rolI_rReg_Var_C0(iRegL dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
+%{
+  match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
+
+  expand %{
+    rolL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+// ror expander
+
+instruct rorL_rReg(iRegL dst, iRegL src, iRegI shift, rFlagsReg cr)
+%{
+  effect(DEF dst, USE src, USE shift);
+
+  format %{ "ror    $dst, $src, $shift" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ rorv(as_Register($dst$$reg), as_Register($src$$reg),
+            as_Register($shift$$reg));
+    %}
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+// ror expander
+
+instruct rorI_rReg(iRegI dst, iRegI src, iRegI shift, rFlagsReg cr)
+%{
+  effect(DEF dst, USE src, USE shift);
+
+  format %{ "ror    $dst, $src, $shift" %}
+  ins_cost(INSN_COST);
+  ins_encode %{
+    __ rorvw(as_Register($dst$$reg), as_Register($src$$reg),
+            as_Register($shift$$reg));
+    %}
+  ins_pipe(ialu_reg_reg_vshift);
+%}
+
+instruct rorL_rReg_Var_C_64(iRegL dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
+%{
+  match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
+
+  expand %{
+    rorL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rorL_rReg_Var_C0(iRegL dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
+%{
+  match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
+
+  expand %{
+    rorL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rorI_rReg_Var_C_32(iRegL dst, iRegL src, iRegI shift, immI_32 c_32, rFlagsReg cr)
+%{
+  match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
+
+  expand %{
+    rorL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+instruct rorI_rReg_Var_C0(iRegL dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
+%{
+  match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
+
+  expand %{
+    rorL_rReg(dst, src, shift, cr);
+  %}
+%}
+
+// Add/subtract (extended)
+
+instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (ConvI2L src2)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxtw $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw);
+   %}
+  ins_pipe(ialu_reg_reg);
+%};
+
+instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
+%{
+  match(Set dst (SubL src1 (ConvI2L src2)));
+  ins_cost(INSN_COST);
+  format %{ "sub  $dst, $src1, sxtw $src2" %}
+
+   ins_encode %{
+     __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw);
+   %}
+  ins_pipe(ialu_reg_reg);
+%};
+
+
+instruct AddExtI_sxth(iRegINoSp dst, iRegI src1, iRegI src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxth $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtI_sxtb(iRegINoSp dst, iRegI src1, iRegI src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxtb $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtI_uxtb(iRegINoSp dst, iRegI src1, iRegI src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, uxtb $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxth $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxth);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxtw $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtw);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, sxtb $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::sxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, uxtb $src2" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+
+instruct AddExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (AndI src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, uxtb" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtI_uxth_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_65535 mask, rFlagsReg cr)
+%{
+  match(Set dst (AddI src1 (AndI src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "addw  $dst, $src1, $src2, uxth" %}
+
+   ins_encode %{
+     __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (AndL src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxtb" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (AndL src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxth" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
+%{
+  match(Set dst (AddL src1 (AndL src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "add  $dst, $src1, $src2, uxtw" %}
+
+   ins_encode %{
+     __ add(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtw);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct SubExtI_uxtb_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_255 mask, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (AndI src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, uxtb" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxtb);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct SubExtI_uxth_and(iRegINoSp dst, iRegI src1, iRegI src2, immI_65535 mask, rFlagsReg cr)
+%{
+  match(Set dst (SubI src1 (AndI src2 mask)));
+  ins_cost(INSN_COST);
+  format %{ "subw  $dst, $src1, $src2, uxth" %}
+
+   ins_encode %{
+     __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
+            as_Register($src2$$reg), ext::uxth);
+   %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
+%{