changeset 6167:cdcb11ccfb3b

Merge
author asaha
date Wed, 02 Apr 2014 09:59:18 -0700
parents e6131477d52e d5f0404d965f
children 4d0a601553cc
files .hgtags make/hotspot_version
diffstat 499 files changed, 81986 insertions(+), 2070 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Mar 31 14:07:26 2014 -0700
+++ b/.hgtags	Wed Apr 02 09:59:18 2014 -0700
@@ -451,3 +451,5 @@
 c3d92e04873788275eeebec6bcd2948cdbd143a7 jdk8u20-b06
 39eae002499704438142e78f5e0e24d46d0b266f hs25.20-b07
 f0ea4d3df1299b6c958e1a72f892c695fca055ad jdk8u20-b07
+2627c7be4279478b880d7f643a252d185e4915ec hs25.20-b08
+e9ffa408f7af28205a7114ca78bce29846f5a8df jdk8u20-b08
--- a/agent/src/os/bsd/MacosxDebuggerLocal.m	Mon Mar 31 14:07:26 2014 -0700
+++ b/agent/src/os/bsd/MacosxDebuggerLocal.m	Wed Apr 02 09:59:18 2014 -0700
@@ -95,7 +95,9 @@
 #define CHECK_EXCEPTION_CLEAR_(value) if ((*env)->ExceptionOccurred(env)) { (*env)->ExceptionClear(env); return value; } 
 
 static void throw_new_debugger_exception(JNIEnv* env, const char* errMsg) {
-  (*env)->ThrowNew(env, (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException"), errMsg);
+  jclass exceptionClass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
+  CHECK_EXCEPTION;
+  (*env)->ThrowNew(env, exceptionClass, errMsg);
 }
 
 static struct ps_prochandle* get_proc_handle(JNIEnv* env, jobject this_obj) {
@@ -129,6 +131,7 @@
 JNIEXPORT void JNICALL 
 Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_init0(JNIEnv *env, jclass cls) {
   symbolicatorID = (*env)->GetFieldID(env, cls, "symbolicator", "J");
+  CHECK_EXCEPTION;
   taskID = (*env)->GetFieldID(env, cls, "task", "J");
   CHECK_EXCEPTION;
 
@@ -236,13 +239,16 @@
   (JNIEnv *env, jobject this_obj, jlong addr) {
   uintptr_t offset;
   const char* sym = NULL;
+  jstring sym_string;
 
   struct ps_prochandle* ph = get_proc_handle(env, this_obj);
   if (ph != NULL && ph->core != NULL) {
     sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
     if (sym == NULL) return 0;
+    sym_string = (*env)->NewStringUTF(env, sym);
+    CHECK_EXCEPTION_(0);
     return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
-                          (*env)->NewStringUTF(env, sym), (jlong)offset);
+                                                sym_string, (jlong)offset);
   }
   return 0;
 }
@@ -749,11 +755,14 @@
      const char* name;
      jobject loadObject;
      jobject loadObjectList;
+     jstring nameString;
 
      base = get_lib_base(ph, i);
      name = get_lib_name(ph, i);
+     nameString = (*env)->NewStringUTF(env, name);
+     CHECK_EXCEPTION;
      loadObject = (*env)->CallObjectMethod(env, this_obj, createLoadObject_ID,
-                                   (*env)->NewStringUTF(env, name), (jlong)0, (jlong)base);
+                                            nameString, (jlong)0, (jlong)base);
      CHECK_EXCEPTION;
      loadObjectList = (*env)->GetObjectField(env, this_obj, loadObjectList_ID);
      CHECK_EXCEPTION;
--- a/agent/src/os/linux/libproc.h	Mon Mar 31 14:07:26 2014 -0700
+++ b/agent/src/os/linux/libproc.h	Wed Apr 02 09:59:18 2014 -0700
@@ -80,7 +80,7 @@
 *************************************************************************************/
 
 
-#if defined(sparc)  || defined(sparcv9)
+#if defined(sparc) || defined(sparcv9) || defined(ppc64)
 #define user_regs_struct  pt_regs
 #endif
 
--- a/make/Makefile	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/Makefile	Wed Apr 02 09:59:18 2014 -0700
@@ -87,6 +87,7 @@
 # Typical C1/C2 targets made available with this Makefile
 C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1
 C2_VM_TARGETS=product  fastdebug  optimized  debug
+CORE_VM_TARGETS=productcore fastdebugcore optimizedcore debugcore
 ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero
 SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark
 MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1
@@ -136,6 +137,12 @@
 all_debugshark:     debugshark docs export_debug
 all_optimizedshark: optimizedshark docs export_optimized
 
+allcore:           all_productcore all_fastdebugcore
+all_productcore:   productcore docs export_product
+all_fastdebugcore: fastdebugcore docs export_fastdebug
+all_debugcore:     debugcore docs export_debug
+all_optimizedcore: optimizedcore docs export_optimized
+
 # Do everything
 world:         all create_jdk
 
@@ -154,6 +161,7 @@
 # Output directories
 C1_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
 C2_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
+CORE_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_core
 MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
 ZERO_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_zero
 SHARK_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_shark
@@ -167,6 +175,10 @@
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
 
+$(CORE_VM_TARGETS):
+	$(CD) $(GAMMADIR)/make; \
+	$(MAKE) BUILD_DIR=$(CORE_DIR) BUILD_FLAVOR=$(@:%core=%) VM_TARGET=$@ generic_buildcore $(ALT_OUT)
+
 $(ZERO_VM_TARGETS):
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
@@ -228,6 +240,20 @@
 		      $(MAKE_ARGS) $(VM_TARGET)
 endif
 
+generic_buildcore: $(HOTSPOT_SCRIPT)
+ifeq ($(HS_ARCH),ppc)
+  ifeq ($(ARCH_DATA_MODEL),64)
+	$(MKDIR) -p $(OUTPUTDIR)
+	$(CD) $(OUTPUTDIR); \
+		$(MAKE) -f $(ABS_OS_MAKEFILE) \
+			$(MAKE_ARGS) $(VM_TARGET)
+  else
+	@$(ECHO) "No ($(VM_TARGET)) for ppc ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
+  endif
+else
+	@$(ECHO) "No ($(VM_TARGET)) for $(HS_ARCH)"
+endif
+
 generic_buildzero: $(HOTSPOT_SCRIPT)
 	$(MKDIR) -p $(OUTPUTDIR)
 	$(CD) $(OUTPUTDIR); \
@@ -287,6 +313,7 @@
 DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
 C1_BUILD_DIR      =$(C1_DIR)/$(BUILD_FLAVOR)
 C2_BUILD_DIR      =$(C2_DIR)/$(BUILD_FLAVOR)
+CORE_BUILD_DIR    =$(CORE_DIR)/$(BUILD_FLAVOR)
 MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR)
 ZERO_BUILD_DIR    =$(ZERO_DIR)/$(BUILD_FLAVOR)
 SHARK_BUILD_DIR   =$(SHARK_DIR)/$(BUILD_FLAVOR)
@@ -464,6 +491,28 @@
 	$(install-dir)
 endif
 
+# Core
+ifeq ($(JVM_VARIANT_CORE), true)
+# Common
+$(EXPORT_LIB_DIR)/%.jar:			$(CORE_BUILD_DIR)/../generated/%.jar
+	$(install-file)
+$(EXPORT_INCLUDE_DIR)/%:			$(CORE_BUILD_DIR)/../generated/jvmtifiles/%
+	$(install-file)
+# Unix
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.diz:			$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+endif
+
 # Shark
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
 # Common
@@ -531,6 +580,7 @@
 clean_build:
 	$(RM) -r $(C1_DIR)
 	$(RM) -r $(C2_DIR)
+	$(RM) -r $(CORE_DIR)
 	$(RM) -r $(ZERO_DIR)
 	$(RM) -r $(SHARK_DIR)
 	$(RM) -r $(MINIMAL1_DIR)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/Makefile	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,381 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile creates a build tree and lights off a build.
+# You can go back into the build tree and perform rebuilds or
+# incremental builds as desired. Be sure to reestablish
+# environment variable settings for LD_LIBRARY_PATH and JAVA_HOME.
+
+# The make process now relies on java and javac. These can be
+# specified either implicitly on the PATH, by setting the
+# (JDK-inherited) ALT_BOOTDIR environment variable to full path to a
+# JDK in which bin/java and bin/javac are present and working (e.g.,
+# /usr/local/java/jdk1.3/solaris), or via the (JDK-inherited)
+# default BOOTDIR path value. Note that one of ALT_BOOTDIR
+# or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc.
+# from the PATH.
+#
+# One can set ALT_BOOTDIR or BOOTDIR to point to a jdk that runs on
+# an architecture that differs from the target architecture, as long
+# as the bootstrap jdk runs under the same flavor of OS as the target
+# (i.e., if the target is linux, point to a jdk that runs on a linux
+# box).  In order to use such a bootstrap jdk, set the make variable
+# REMOTE to the desired remote command mechanism, e.g.,
+#
+#    make REMOTE="rsh -l me myotherlinuxbox"
+
+# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
+# JDI binding on SA produces two binaries:
+#  1. sa-jdi.jar       - This is built before building libjvm.so
+#                        Please refer to ./makefiles/sa.make
+#  2. libsa.so         - Native library for SA - This is built after
+#                        libjsig.so (signal interposition library)
+#                        Please refer to ./makefiles/vm.make
+# If $(GAMMADIR)/agent dir is not present, SA components are not built.
+
+# No tests on Aix.
+TEST_IN_BUILD=false
+
+ifeq ($(GAMMADIR),)
+include ../../make/defs.make
+else
+include $(GAMMADIR)/make/defs.make
+endif
+include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
+
+ifndef CC_INTERP
+  ifndef FORCE_TIERED
+    FORCE_TIERED=1
+  endif
+endif
+# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
+ifneq (,$(filter $(ARCH),ppc64 pp64le))
+  FORCE_TIERED=0
+endif
+
+ifdef LP64
+  ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
+    _JUNK_ := $(shell echo >&2 \
+       $(OSNAME) $(ARCH) "*** ERROR: this platform does not support 64-bit compilers!")
+	@exit 1
+  endif
+endif
+
+# we need to set up LP64 correctly to satisfy sanity checks in adlc
+ifneq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
+  MFLAGS += " LP64=1 "
+endif
+
+# pass USE_SUNCC further, through MFLAGS
+ifdef USE_SUNCC
+  MFLAGS += " USE_SUNCC=1 "
+endif
+
+# The following renders pathnames in generated Makefiles valid on
+# machines other than the machine containing the build tree.
+#
+# For example, let's say my build tree lives on /files12 on
+# exact.east.sun.com.  This logic will cause GAMMADIR to begin with
+# /net/exact/files12/...
+#
+# We only do this on SunOS variants, for a couple of reasons:
+#  * It is extremely rare that source trees exist on other systems
+#  * It has been claimed that the Linux automounter is flakey, so
+#    changing GAMMADIR in a way that exercises the automounter could
+#    prove to be a source of unreliability in the build process.
+# Obviously, this Makefile is only relevant on SunOS boxes to begin
+# with, but the SunOS conditionalization will make it easier to
+# combine Makefiles in the future (assuming we ever do that).
+
+ifeq ($(OSNAME),solaris)
+
+  #   prepend current directory to relative pathnames.
+  NEW_GAMMADIR :=                                    \
+    $(shell echo $(GAMMADIR) |                       \
+      sed -e "s=^\([^/].*\)=$(shell pwd)/\1="        \
+     )
+  unexport NEW_GAMMADIR
+
+  # If NEW_GAMMADIR doesn't already start with "/net/":
+  ifeq ($(strip $(filter /net/%,$(NEW_GAMMADIR))),)
+    #   prepend /net/$(HOST)
+    #   remove /net/$(HOST) if name already began with /home/
+    #   remove /net/$(HOST) if name already began with /java/
+    #   remove /net/$(HOST) if name already began with /lab/
+    NEW_GAMMADIR :=                                     \
+         $(shell echo $(NEW_GAMMADIR) |                 \
+                 sed -e "s=^\(.*\)=/net/$(HOST)\1="     \
+                     -e "s=^/net/$(HOST)/home/=/home/=" \
+                     -e "s=^/net/$(HOST)/java/=/java/=" \
+                     -e "s=^/net/$(HOST)/lab/=/lab/="   \
+          )
+    # Don't use the new value for GAMMADIR unless a file with the new
+    # name actually exists.
+    ifneq ($(wildcard $(NEW_GAMMADIR)),)
+      GAMMADIR := $(NEW_GAMMADIR)
+    endif
+  endif
+
+endif
+
+# BUILDARCH is set to "zero" for Zero builds.  VARIANTARCH
+# is used to give the build directories meaningful names.
+VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
+
+# There is a (semi-) regular correspondence between make targets and actions:
+#
+#       Target          Tree Type       Build Dir
+#
+#       debug           compiler2       <os>_<arch>_compiler2/debug
+#       fastdebug       compiler2       <os>_<arch>_compiler2/fastdebug
+#       optimized       compiler2       <os>_<arch>_compiler2/optimized
+#       product         compiler2       <os>_<arch>_compiler2/product
+#
+#       debug1          compiler1       <os>_<arch>_compiler1/debug
+#       fastdebug1      compiler1       <os>_<arch>_compiler1/fastdebug
+#       optimized1      compiler1       <os>_<arch>_compiler1/optimized
+#       product1        compiler1       <os>_<arch>_compiler1/product
+#
+#       debugcore       core            <os>_<arch>_core/debug
+#       fastdebugcore   core            <os>_<arch>_core/fastdebug
+#       optimizedcore   core            <os>_<arch>_core/optimized
+#       productcore     core            <os>_<arch>_core/product
+#
+#       debugzero       zero            <os>_<arch>_zero/debug
+#       fastdebugzero   zero            <os>_<arch>_zero/fastdebug
+#       optimizedzero   zero            <os>_<arch>_zero/optimized
+#       productzero     zero            <os>_<arch>_zero/product
+#
+#       debugshark      shark           <os>_<arch>_shark/debug
+#       fastdebugshark  shark           <os>_<arch>_shark/fastdebug
+#       optimizedshark  shark           <os>_<arch>_shark/optimized
+#       productshark    shark           <os>_<arch>_shark/product
+#
+#       fastdebugminimal1 minimal1      <os>_<arch>_minimal1/fastdebug
+#       productminimal1   minimal1      <os>_<arch>_minimal1/product
+#
+# What you get with each target:
+#
+# debug*     - debug compile with asserts enabled
+# fastdebug* - optimized compile, but with asserts enabled
+# optimized* - optimized compile, no asserts
+# product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
+
+# This target list needs to be coordinated with the usage message
+# in the build.sh script:
+TARGETS           = debug fastdebug optimized product
+
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
+else
+  SUBDIR_DOCS     = $(OSNAME)_$(BUILDARCH)_docs
+endif
+SUBDIRS_C1        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS))
+SUBDIRS_C2        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS))
+SUBDIRS_TIERED    = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
+SUBDIRS_CORE      = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
+SUBDIRS_ZERO      = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
+SUBDIRS_SHARK     = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
+SUBDIRS_MINIMAL1  = $(addprefix $(OSNAME)_$(BUILDARCH)_minimal1/,$(TARGETS))
+
+TARGETS_C2        = $(TARGETS)
+TARGETS_C1        = $(addsuffix 1,$(TARGETS))
+TARGETS_TIERED    = $(addsuffix tiered,$(TARGETS))
+TARGETS_CORE      = $(addsuffix core,$(TARGETS))
+TARGETS_ZERO      = $(addsuffix zero,$(TARGETS))
+TARGETS_SHARK     = $(addsuffix shark,$(TARGETS))
+TARGETS_MINIMAL1 =  $(addsuffix minimal1,$(TARGETS))
+
+BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
+BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
+BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
+
+BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
+
+#-------------------------------------------------------------------------------
+
+# Could make everything by default, but that would take a while.
+all:
+	@echo "Try '$(MAKE) <target> ...'  where <target> is one or more of"
+	@echo "  $(TARGETS_C2)"
+	@echo "  $(TARGETS_C1)"
+	@echo "  $(TARGETS_CORE)"
+	@echo "  $(TARGETS_ZERO)"
+	@echo "  $(TARGETS_SHARK)"
+	@echo "  $(TARGETS_MINIMAL1)"
+
+checks: check_os_version check_j2se_version
+
+# We do not want people accidentally building on old systems (e.g. Linux 2.2.x,
+# Solaris 2.5.1, 2.6).
+# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok.
+
+SUPPORTED_OS_VERSION = AIX
+OS_VERSION := $(shell uname -a)
+EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION))
+
+check_os_version:
+ifeq ($(DISABLE_HOTSPOT_OS_VERSION_CHECK)$(EMPTY_IF_NOT_SUPPORTED),)
+	$(QUIETLY) >&2 echo "*** This OS is not supported:" `uname -a`; exit 1;
+endif
+
+# jvmti.make requires XSLT (J2SE 1.4.x or newer):
+XSLT_CHECK	= $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
+# If not found then fail fast.
+check_j2se_version:
+	$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
+	if [ $$? -ne 0 ]; then \
+	  $(REMOTE) $(RUN.JAVA) -version; \
+	  echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
+	  "to bootstrap this build" 1>&2; \
+	  exit 1; \
+	fi
+
+$(SUBDIRS_TIERED): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=tiered
+
+$(SUBDIRS_C2): $(BUILDTREE_MAKE)
+ifeq ($(FORCE_TIERED),1)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+		$(BUILDTREE) VARIANT=tiered FORCE_TIERED=1
+else
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+		$(BUILDTREE) VARIANT=compiler2
+endif
+
+$(SUBDIRS_C1): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=compiler1
+
+$(SUBDIRS_CORE): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=core
+
+$(SUBDIRS_ZERO): $(BUILDTREE_MAKE) platform_zero
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=zero VARIANTARCH=$(VARIANTARCH)
+
+$(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
+
+$(SUBDIRS_MINIMAL1): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=minimal1
+
+
+platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
+	$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
+
+# Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME
+
+$(TARGETS_C2):  $(SUBDIRS_C2)
+	cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_TIERED):  $(SUBDIRS_TIERED)
+	cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_C1):  $(SUBDIRS_C1)
+	cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_CORE):  $(SUBDIRS_CORE)
+	cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_ZERO):  $(SUBDIRS_ZERO)
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_SHARK):  $(SUBDIRS_SHARK)
+	cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_MINIMAL1):  $(SUBDIRS_MINIMAL1)
+	cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+# Just build the tree, and nothing else:
+tree:      $(SUBDIRS_C2)
+tree1:     $(SUBDIRS_C1)
+treecore:  $(SUBDIRS_CORE)
+treezero:  $(SUBDIRS_ZERO)
+treeshark: $(SUBDIRS_SHARK)
+treeminimal1: $(SUBDIRS_MINIMAL1)
+
+# Doc target.  This is the same for all build options.
+#     Hence create a docs directory beside ...$(ARCH)_[...]
+# We specify 'BUILD_FLAVOR=product' so that the proper
+# ENABLE_FULL_DEBUG_SYMBOLS value is used.
+docs: checks
+	$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
+	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
+
+# Synonyms for win32-like targets.
+compiler2:  debug product
+
+compiler1:  debug1 product1
+
+core: debugcore productcore
+
+zero: debugzero productzero
+
+shark: debugshark productshark
+
+clean_docs:
+	rm -rf $(SUBDIR_DOCS)
+
+clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark clean_minimal1:
+	rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
+
+clean:  clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_minimal1 clean_docs
+
+include $(GAMMADIR)/make/cscope.make
+
+#-------------------------------------------------------------------------------
+
+.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK) $(TARGETS_MINIMAL1)
+.PHONY: tree tree1 treecore treezero treeshark
+.PHONY: all compiler1 compiler2 core zero shark
+.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
+.PHONY: checks check_os_version check_j2se_version
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/adlc_updater	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,20 @@
+#! /bin/sh
+#
+# This file is used by adlc.make to selectively update generated
+# adlc files. Because source and target diretories are relative
+# paths, this file is copied to the target build directory before
+# use.
+#
+# adlc-updater <file> <source-dir> <target-dir>
+#
+fix_lines() {
+  # repair bare #line directives in $1 to refer to $2
+  awk < $1 > $1+ '
+    /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
+    {print}
+  ' F2=$2
+  mv $1+ $1
+}
+fix_lines $2/$1 $3/$1
+[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
+( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/adjust-mflags.sh	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,87 @@
+#! /bin/sh
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This script is used only from top.make.
+# The macro $(MFLAGS-adjusted) calls this script to
+# adjust the "-j" arguments to take into account
+# the HOTSPOT_BUILD_JOBS variable.  The default
+# handling of the "-j" argument by gnumake does
+# not meet our needs, so we must adjust it ourselves.
+
+# This argument adjustment applies to two recursive
+# calls to "$(MAKE) $(MFLAGS-adjusted)" in top.make.
+# One invokes adlc.make, and the other invokes vm.make.
+# The adjustment propagates the desired concurrency
+# level down to the sub-make (of the adlc or vm).
+# The default behavior of gnumake is to run all
+# sub-makes without concurrency ("-j1").
+
+# Also, we use a make variable rather than an explicit
+# "-j<N>" argument to control this setting, so that
+# the concurrency setting (which must be tuned separately
+# for each MP system) can be set via an environment variable.
+# The recommended setting is 1.5x to 2x the number of available
+# CPUs on the MP system, which is large enough to keep the CPUs
+# busy (even though some jobs may be I/O bound) but not too large,
+# we may presume, to overflow the system's swap space.
+
+set -eu
+
+default_build_jobs=4
+
+case $# in
+[12])	true;;
+*)	>&2 echo "Usage: $0 ${MFLAGS} ${HOTSPOT_BUILD_JOBS}"; exit 2;;
+esac
+
+MFLAGS=$1
+HOTSPOT_BUILD_JOBS=${2-}
+
+# Normalize any -jN argument to the form " -j${HBJ}"
+MFLAGS=`
+	echo "$MFLAGS" \
+	| sed '
+		s/^-/ -/
+		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -j[0-9][0-9]*/ -j/
+		s/ -j\([^ 	]\)/ -j -\1/
+		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
+	' `
+
+case ${HOTSPOT_BUILD_JOBS} in \
+
+'') case ${MFLAGS} in
+    *\ -j*)
+	>&2 echo "# Note: -jN is ineffective for setting parallelism in this makefile." 
+	>&2 echo "# please set HOTSPOT_BUILD_JOBS=${default_build_jobs} in the command line or environment."
+    esac;;
+
+?*) case ${MFLAGS} in
+     *\ -j*) true;;
+     *)      MFLAGS="-j${HOTSPOT_BUILD_JOBS} ${MFLAGS}";;
+    esac;;
+esac
+
+echo "${MFLAGS}"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/adlc.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,231 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (adlc.make) is included from the adlc.make in the
+# build directories.
+# It knows how to compile, link, and run the adlc.
+
+include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/rules.make
+
+# #########################################################################
+
+# OUTDIR must be the same as AD_Dir = $(GENERATED)/adfiles in top.make:
+GENERATED = ../generated
+OUTDIR  = $(GENERATED)/adfiles
+
+ARCH = $(Platform_arch)
+OS = $(Platform_os_family)
+
+SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad 
+
+ifeq ("${Platform_arch_model}", "${Platform_arch}")
+  SOURCES.AD = \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
+else
+  SOURCES.AD = \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
+endif
+
+EXEC = $(OUTDIR)/adlc
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(GAMMADIR)/src/share/vm/adlc
+VPATH += $(Src_Dirs_V:%=%:)
+
+# set INCLUDES for C preprocessor
+Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
+INCLUDES += $(Src_Dirs_I:%=-I%)
+
+# set flags for adlc compilation
+CXXFLAGS = $(SYSDEFS) $(INCLUDES)
+
+# Force assertions on.
+CXXFLAGS += -DASSERT
+
+# CFLAGS_WARN holds compiler options to suppress/enable warnings.
+# Suppress warnings (for now)
+CFLAGS_WARN = -w
+CFLAGS += $(CFLAGS_WARN)
+
+OBJECTNAMES = \
+	adlparse.o \
+	archDesc.o \
+	arena.o \
+	dfa.o \
+	dict2.o \
+	filebuff.o \
+	forms.o \
+	formsopt.o \
+	formssel.o \
+	main.o \
+	adlc-opcodes.o \
+	output_c.o \
+	output_h.o \
+
+OBJECTS = $(OBJECTNAMES:%=$(OUTDIR)/%)
+
+GENERATEDNAMES = \
+        ad_$(Platform_arch_model).cpp \
+        ad_$(Platform_arch_model).hpp \
+        ad_$(Platform_arch_model)_clone.cpp \
+        ad_$(Platform_arch_model)_expand.cpp \
+        ad_$(Platform_arch_model)_format.cpp \
+        ad_$(Platform_arch_model)_gen.cpp \
+        ad_$(Platform_arch_model)_misc.cpp \
+        ad_$(Platform_arch_model)_peephole.cpp \
+        ad_$(Platform_arch_model)_pipeline.cpp \
+        adGlobals_$(Platform_arch_model).hpp \
+        dfa_$(Platform_arch_model).cpp \
+
+GENERATEDFILES = $(GENERATEDNAMES:%=$(OUTDIR)/%)
+
+# #########################################################################
+
+all: $(EXEC)
+
+$(EXEC) : $(OBJECTS)
+	@echo Making adlc
+	$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
+
+# Random dependencies:
+$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
+
+# The source files refer to ostream.h, which sparcworks calls iostream.h
+$(OBJECTS): ostream.h
+
+ostream.h :
+	@echo >$@ '#include <iostream.h>'
+
+dump:
+	: OUTDIR=$(OUTDIR)
+	: OBJECTS=$(OBJECTS)
+	: products = $(GENERATEDFILES)
+
+all: $(GENERATEDFILES)
+
+$(GENERATEDFILES): refresh_adfiles
+
+# Get a unique temporary directory name, so multiple makes can run in parallel.
+# Note that product files are updated via "mv", which is atomic.
+TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
+
+# Debuggable by default
+CFLAGS += -g
+
+# Pass -D flags into ADLC.
+ADLCFLAGS += $(SYSDEFS)
+
+# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
+ADLCFLAGS += -q -T
+
+# Normally, debugging is done directly on the ad_<arch>*.cpp files.
+# But -g will put #line directives in those files pointing back to <arch>.ad.
+# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
+# so skip it for 3.2 and ealier.
+ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+ADLCFLAGS += -g
+endif
+
+ifdef LP64
+ADLCFLAGS += -D_LP64
+else
+ADLCFLAGS += -U_LP64
+endif
+
+#
+# adlc_updater is a simple sh script, under sccs control. It is
+# used to selectively update generated adlc files. This should
+# provide a nice compilation speed improvement.
+#
+ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS)
+ADLC_UPDATER = adlc_updater
+$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER)
+	$(QUIETLY) cp $< $@; chmod +x $@
+
+# This action refreshes all generated adlc files simultaneously.
+# The way it works is this:
+# 1) create a scratch directory to work in.
+# 2) if the current working directory does not have $(ADLC_UPDATER), copy it.
+# 3) run the compiled adlc executable. This will create new adlc files in the scratch directory.
+# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files.
+# 5) If we actually updated any files, echo a notice.
+#
+refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER)
+	@rm -rf $(TEMPDIR); mkdir $(TEMPDIR)
+	$(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \
+            -c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \
+	    || { rm -rf $(TEMPDIR); exit 1; }
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_clone.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_expand.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_format.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_gen.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_misc.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_peephole.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_pipeline.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) adGlobals_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) dfa_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) [ -f $(TEMPDIR)/made-change ] \
+		|| echo "Rescanned $(SOURCE.AD) but encountered no changes."
+	$(QUIETLY) rm -rf $(TEMPDIR)
+
+
+# #########################################################################
+
+$(SOURCE.AD): $(SOURCES.AD)
+	$(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD)
+
+#PROCESS_AD_FILES = cat
+# Pass through #line directives, in case user enables -g option above:
+PROCESS_AD_FILES = awk '{ \
+    if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \
+    if (need_lineno && $$0 !~ /\/\//) \
+      { print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \
+    print }'
+
+$(OUTDIR)/%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
+
+# Some object files are given a prefix, to disambiguate
+# them from objects of the same name built for the VM.
+$(OUTDIR)/adlc-%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
+
+# #########################################################################
+
+clean:
+	rm $(OBJECTS)
+
+cleanall:
+	rm $(OBJECTS) $(EXEC)
+
+# #########################################################################
+
+.PHONY: all dump refresh_adfiles clean cleanall
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/build_vm_def.sh	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# If we're cross compiling use that path for nm
+if [ "$CROSS_COMPILE_ARCH" != "" ]; then 
+NM=$ALT_COMPILER_PATH/nm
+else
+# On AIX we have to prevent that we pick up the 'nm' version from the GNU binutils
+# which may be installed under /opt/freeware/bin. So better use an absolute path here! 
+NM=/usr/bin/nm
+fi
+
+$NM -X64 -B -C $* \
+    | awk '{
+              if (($2="d" || $2="D") && ($3 ~ /^__vft/ || $3 ~ /^gHotSpotVM/)) print "\t" $3 ";"
+              if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
+              if ($3 ~ /^SharedArchivePath__9Arguments$/) print "\t" $3 ";"
+          }' \
+    | sort -u
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/buildtree.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,364 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Usage:
+#
+# $(MAKE) -f buildtree.make SRCARCH=srcarch BUILDARCH=buildarch LIBARCH=libarch
+#         GAMMADIR=dir OS_FAMILY=os VARIANT=variant
+#
+# The macros ARCH, GAMMADIR, OS_FAMILY and VARIANT must be defined in the
+# environment or on the command-line:
+#
+# ARCH		- sparc, i486, ... HotSpot cpu and os_cpu source directory
+# BUILDARCH     - build directory
+# LIBARCH       - the corresponding directory in JDK/JRE
+# GAMMADIR	- top of workspace
+# OS_FAMILY	- operating system
+# VARIANT	- core, compiler1, compiler2, or tiered
+# HOTSPOT_RELEASE_VERSION - <major>.<minor>-b<nn> (11.0-b07)
+# HOTSPOT_BUILD_VERSION   - internal, internal-$(USER_RELEASE_SUFFIX) or empty
+# JRE_RELEASE_VERSION     - <major>.<minor>.<micro> (1.7.0)
+#
+# Builds the directory trees with makefiles plus some convenience files in
+# each directory:
+#
+# Makefile	- for "make foo"
+# flags.make	- with macro settings
+# vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
+# adlc.make	-
+# trace.make	- generate tracing event and type definitions
+# jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
+# sa.make	- generate SA jar file and natives
+#
+# The makefiles are split this way so that "make foo" will run faster by not
+# having to read the dependency files for the vm.
+
+-include $(SPEC)
+include $(GAMMADIR)/make/scm.make
+include $(GAMMADIR)/make/defs.make
+include $(GAMMADIR)/make/altsrc.make
+
+
+# 'gmake MAKE_VERBOSE=y' or 'gmake QUIETLY=' gives all the gory details.
+QUIETLY$(MAKE_VERBOSE)	= @
+
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  PLATFORM_FILE = $(shell dirname $(shell dirname $(shell pwd)))/platform_zero
+else
+  ifdef USE_SUNCC
+    PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc
+  else
+    PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH)
+  endif
+endif
+
+# Allow overriding of the arch part of the directory but default
+# to BUILDARCH if nothing is specified
+ifeq ($(VARIANTARCH),)
+  VARIANTARCH=$(BUILDARCH)
+endif
+
+ifdef FORCE_TIERED
+ifeq		($(VARIANT),tiered)
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_compiler2
+else
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
+endif
+else
+PLATFORM_DIR    = $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
+endif
+
+#
+# We do two levels of exclusion in the shared directory.
+# TOPLEVEL excludes are pruned, they are not recursively searched,
+# but lower level directories can be named without fear of collision.
+# ALWAYS excludes are excluded at any level in the directory tree.
+#
+
+ALWAYS_EXCLUDE_DIRS     = $(SCM_DIRS)
+
+ifeq		($(VARIANT),tiered)
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name agent
+else
+ifeq		($(VARIANT),compiler2)
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name c1 -o -name agent
+else
+# compiler1 and core use the same exclude list
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name opto -o -name libadt -o -name agent
+endif
+endif
+
+# Get things from the platform file.
+COMPILER	= $(shell sed -n 's/^compiler[ 	]*=[ 	]*//p' $(PLATFORM_FILE))
+
+SIMPLE_DIRS	= \
+	$(PLATFORM_DIR)/generated/dependencies \
+	$(PLATFORM_DIR)/generated/adfiles \
+	$(PLATFORM_DIR)/generated/jvmtifiles \
+	$(PLATFORM_DIR)/generated/tracefiles
+
+TARGETS      = debug fastdebug optimized product
+SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
+
+# For dependencies and recursive makes.
+BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
+
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+
+BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
+	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
+
+# Define variables to be set in flags.make.
+# Default values are set in make/defs.make.
+ifeq ($(HOTSPOT_BUILD_VERSION),)
+  HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)
+else
+  HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)
+endif
+# Set BUILD_USER from system-dependent hints:  $LOGNAME, $(whoami)
+ifndef HOTSPOT_BUILD_USER
+  HOTSPOT_BUILD_USER := $(shell echo $$LOGNAME)
+endif
+ifndef HOTSPOT_BUILD_USER
+  HOTSPOT_BUILD_USER := $(shell whoami)
+endif
+# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro
+# or make/hotspot_distro.
+ifndef HOTSPOT_VM_DISTRO
+  ifeq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
+    include $(GAMMADIR)/make/hotspot_distro
+  else
+    include $(GAMMADIR)/make/openjdk_distro
+  endif
+endif
+
+# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
+ifndef OPENJDK
+  ifneq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
+    OPENJDK=true
+  endif
+endif
+
+BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION=  JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+
+BUILDTREE	= \
+	$(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS)
+
+BUILDTREE_COMMENT	= echo "\# Generated by $(BUILDTREE_MAKE)"
+
+all:  $(SUBMAKE_DIRS)
+
+# Run make in each subdirectory recursively.
+$(SUBMAKE_DIRS): $(SIMPLE_DIRS) FORCE
+	$(QUIETLY) [ -d $@ ] || { mkdir -p $@; }
+	$(QUIETLY) cd $@ && $(BUILDTREE) TARGET=$(@F)
+	$(QUIETLY) touch $@
+
+$(SIMPLE_DIRS):
+	$(QUIETLY) mkdir -p $@
+
+# Convenience macro which takes a source relative path, applies $(1) to the
+# absolute path, and then replaces $(GAMMADIR) in the result with a
+# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
+gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
+
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/ppc64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
+flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \
+	sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
+	echo; \
+	echo "GAMMADIR = $(GAMMADIR)"; \
+	echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
+	echo "OSNAME = $(OSNAME)"; \
+	echo "SYSDEFS = \$$(Platform_sysdefs)"; \
+	echo "SRCARCH = $(SRCARCH)"; \
+	echo "BUILDARCH = $(BUILDARCH)"; \
+	echo "LIBARCH = $(LIBARCH)"; \
+	echo "TARGET = $(TARGET)"; \
+	echo "HS_BUILD_VER = $(HS_BUILD_VER)"; \
+	echo "JRE_RELEASE_VER = $(JRE_RELEASE_VERSION)"; \
+	echo "SA_BUILD_VERSION = $(HS_BUILD_VER)"; \
+	echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
+	echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
+	echo "OPENJDK = $(OPENJDK)"; \
+	echo "$(LP64_SETTING/$(DATA_MODE))"; \
+	echo; \
+	echo "# Used for platform dispatching"; \
+	echo "TARGET_DEFINES  = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
+	echo "TARGET_DEFINES += -DTARGET_ARCH_\$$(Platform_arch)"; \
+	echo "TARGET_DEFINES += -DTARGET_ARCH_MODEL_\$$(Platform_arch_model)"; \
+	echo "TARGET_DEFINES += -DTARGET_OS_ARCH_\$$(Platform_os_arch)"; \
+	echo "TARGET_DEFINES += -DTARGET_OS_ARCH_MODEL_\$$(Platform_os_arch_model)"; \
+	echo "TARGET_DEFINES += -DTARGET_COMPILER_\$$(Platform_compiler)"; \
+	echo "CFLAGS += \$$(TARGET_DEFINES)"; \
+	echo; \
+	echo "Src_Dirs_V = \\"; \
+	sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \
+	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
+	echo; \
+	echo "Src_Dirs_I = \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
+	[ -n "$(CFLAGS_BROWSE)" ] && \
+	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
+	    echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
+	[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
+	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
+	[ -n "$(ZIPEXE)" ] && \
+	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
+	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
+	    echo && \
+	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
+	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
+	[ -n "$(INCLUDE_TRACE)" ] && \
+	    echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
+	echo; \
+	[ -n "$(SPEC)" ] && \
+	    echo "include $(SPEC)"; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
+	echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
+	) > $@
+
+flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
+	) > $@
+
+../shared_dirs.lst:  $(BUILDTREE_MAKE) $(GAMMADIR)/src/share/vm
+	@echo Creating directory list $@
+	$(QUIETLY) if [ -d $(HS_ALT_SRC)/share/vm ]; then \
+          find $(HS_ALT_SRC)/share/vm/* -prune \
+	  -type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
+          \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; > $@; \
+        fi;
+	$(QUIETLY) find $(HS_COMMON_SRC)/share/vm/* -prune \
+	-type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
+        \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; >> $@
+
+Makefile: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/top.make"; \
+	) > $@
+
+vm.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo include flags_vm.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+adlc.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+jvmti.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+trace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+sa.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+FORCE:
+
+.PHONY:  all FORCE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/compiler2.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making server version of VM
+
+TYPE=COMPILER2
+
+VM_SUBDIR = server
+
+CFLAGS += -DCOMPILER2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/core.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making core version of VM
+
+# Select which files to use (in top.make)
+TYPE=CORE
+
+# There is no "core" directory in JDK. Install core build in server directory.
+VM_SUBDIR = server
+
+# Note:  macros.hpp defines CORE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/debug.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,41 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Sets make macros for making debug version of VM
+
+# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
+DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
+DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
+CFLAGS += $(DEBUG_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
+
+VERSION = debug
+SYSDEFS += -DASSERT -DDEBUG
+PICFLAGS = DEFAULT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/defs.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,231 @@
+#
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# The common definitions for hotspot AIX builds.
+# Include the top level defs.make under make directory instead of this one.
+# This file is included into make/defs.make.
+
+SLASH_JAVA ?= /java
+
+# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
+#ARCH:=$(shell uname -m)
+PATH_SEP = :
+ifeq ($(LP64), 1)
+  ARCH_DATA_MODEL ?= 64
+else
+  ARCH_DATA_MODEL ?= 32
+endif
+
+ifeq ($(ARCH_DATA_MODEL), 64)
+  ARCH = ppc64
+else
+  ARCH = ppc
+endif
+
+# PPC
+ifeq ($(ARCH), ppc)
+  #ARCH_DATA_MODEL = 32
+  PLATFORM         = aix-ppc
+  VM_PLATFORM      = aix_ppc
+  HS_ARCH          = ppc
+endif
+
+# PPC64
+ifeq ($(ARCH), ppc64)
+  #ARCH_DATA_MODEL = 64
+  MAKE_ARGS       += LP64=1
+  PLATFORM         = aix-ppc64
+  VM_PLATFORM      = aix_ppc64
+  HS_ARCH          = ppc
+endif
+
+# On 32 bit aix we build server and client, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer.
+  # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
+  # builds is enabled with debug info files ZIP'ed to save space. For
+  # BUILD_FLAVOR != product builds, FDS is always enabled, after all a
+  # debug build without debug info isn't very useful.
+  # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
+  #
+  # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
+  # disabled for a BUILD_FLAVOR == product build.
+  #
+  # Note: Use of a different variable name for the FDS override option
+  # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
+  # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
+  # in options via environment variables, use of distinct variables
+  # prevents strange behaviours. For example, in a BUILD_FLAVOR !=
+  # product build, the FULL_DEBUG_SYMBOLS environment variable will be
+  # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
+  # the same variable name is used, then different values can be picked
+  # up by different parts of the build. Just to be clear, we only need
+  # two variable names because the incoming option value can be
+  # overridden in some situations, e.g., a BUILD_FLAVOR != product
+  # build.
+
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      # Default OBJCOPY comes from GNU Binutils on Linux
+      ifeq ($(CROSS_COMPILE_ARCH),)
+        DEF_OBJCOPY=/usr/bin/objcopy
+      else
+        # Assume objcopy is part of the cross-compilation toolset
+        ifneq ($(ALT_COMPILER_PATH),)
+          DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
+        endif
+      endif
+      OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+      ifneq ($(ALT_OBJCOPY),)
+        _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+        OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+      endif
+
+      ifeq ($(OBJCOPY),)
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files. You may need to set ALT_OBJCOPY.")
+        ENABLE_FULL_DEBUG_SYMBOLS=0
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+      else
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+
+        # Library stripping policies for .debuginfo configs:
+        #   all_strip - strips everything from the library
+        #   min_strip - strips most stuff from the library; leaves minimum symbols
+        #   no_strip  - does not strip the library at all
+        #
+        # Oracle security policy requires "all_strip". A waiver was granted on
+        # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+        #
+        # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
+        #
+        STRIP_POLICY ?= min_strip
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+
+        ZIP_DEBUGINFO_FILES ?= 1
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
+
+# unused JDK_INCLUDE_SUBDIR=aix
+
+# Library suffix
+LIBRARY_SUFFIX=so
+
+EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
+
+# client and server subdirectories have symbolic links to ../libjsig.so
+EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+#  else
+#    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+#  endif
+#endif
+EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
+EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
+
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+#  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
+#    else
+#      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+#    endif
+#  endif
+endif
+
+ifeq ($(JVM_VARIANT_CLIENT),true)
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+#  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
+#    else
+#      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+#    endif
+#  endif
+endif
+
+# Serviceability Binaries
+# No SA Support for PPC or zero
+ADD_SA_BINARIES/ppc   =
+ADD_SA_BINARIES/ppc64 =
+ADD_SA_BINARIES/zero  =
+
+EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/dtrace.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,27 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Linux does not build jvm_db
+LIBJVM_DB =
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/fastdebug.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,73 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making debug version of VM
+
+# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
+# Pare down optimization to -O2 if xlCV10.1 is in use.
+OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) $(QV10_OPT_CONSERVATIVE)
+OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
+
+# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
+
+ifeq ($(BUILDARCH), ia64)
+  # Bug in GCC, causes hang.  -O1 will override the -O3 specified earlier
+  OPT_CFLAGS/callGenerator.o += -O1
+  OPT_CFLAGS/ciTypeFlow.o += -O1
+  OPT_CFLAGS/compile.o += -O1
+  OPT_CFLAGS/concurrentMarkSweepGeneration.o += -O1
+  OPT_CFLAGS/doCall.o += -O1
+  OPT_CFLAGS/generateOopMap.o += -O1
+  OPT_CFLAGS/generateOptoStub.o += -O1
+  OPT_CFLAGS/graphKit.o += -O1
+  OPT_CFLAGS/instanceKlass.o += -O1
+  OPT_CFLAGS/interpreterRT_ia64.o += -O1
+  OPT_CFLAGS/output.o += -O1
+  OPT_CFLAGS/parse1.o += -O1
+  OPT_CFLAGS/runtime.o += -O1
+  OPT_CFLAGS/synchronizer.o += -O1
+endif
+
+
+# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
+CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
+
+# xlc 10.1 parameters for ipa linkage.
+#  - remove ipa linkage altogether. Does not seem to benefit performance, 
+#    but increases code footprint.
+#  - this is a debug build in the end. Extra effort for ipa linkage is thus 
+#    not justified.
+LFLAGS_QIPA=
+
+G_SUFFIX = _g
+VERSION = optimized
+SYSDEFS += -DASSERT -DFASTDEBUG
+PICFLAGS = DEFAULT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/jsig.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,87 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Rules to build signal interposition library, used by vm.make
+
+# libjsig.so: signal interposition library
+JSIG = jsig
+LIBJSIG = lib$(JSIG).so
+
+LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+LIBJSIG_DIZ         = lib$(JSIG).diz
+
+JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
+
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
+DEST_JSIG_DIZ       = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
+
+LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
+
+# On Linux we really dont want a mapfile, as this library is small 
+# and preloaded using LD_PRELOAD, making functions private will 
+# cause problems with interposing. See CR: 6466665
+# LFLAGS_JSIG += $(MAPFLAG:FILENAME=$(LIBJSIG_MAPFILE))
+
+LFLAGS_JSIG += -D_GNU_SOURCE -D_REENTRANT $(LDFLAGS_HASH_STYLE)
+
+LFLAGS_JSIG += $(BIN_UTILS)
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  JSIG_DEBUG_CFLAGS = -g
+endif
+
+$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
+	@echo Making signal interposition lib...
+	$(QUIETLY) $(CXX) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
+                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+#  ifeq ($(STRIP_POLICY),all_strip)
+#	$(QUIETLY) $(STRIP) $@
+#  else
+#    ifeq ($(STRIP_POLICY),min_strip)
+#	$(QUIETLY) $(STRIP) -g $@
+#    # implied else here is no stripping at all
+#    endif
+#  endif
+#  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+#	$(RM) $(LIBJSIG_DEBUGINFO)
+#  endif
+#endif
+
+install_jsig: $(LIBJSIG)
+	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
+	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
+
+.PHONY: install_jsig
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/jvmti.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,118 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jvmti.make) is included from the jvmti.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jvmti.
+
+include $(GAMMADIR)/make/aix/makefiles/rules.make
+
+# #########################################################################
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+
+JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims
+InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(JvmtiSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+JvmtiGeneratedNames = \
+        jvmtiEnv.hpp \
+        jvmtiEnter.cpp \
+        jvmtiEnterTrace.cpp \
+        jvmtiEnvRecommended.cpp \
+        bytecodeInterpreterWithChecks.cpp \
+        jvmti.h \
+
+JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java
+JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class
+
+JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java
+JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class
+
+JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%)
+
+XSLT = $(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+.PHONY: all jvmtidocs clean cleanall
+
+# #########################################################################
+
+all: $(JvmtiGeneratedFiles)
+
+both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl
+
+$(JvmtiGenClass): $(JvmtiGenSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
+
+$(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
+
+$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti
+
+$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp 
+
+$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace
+
+$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass)
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp
+	$(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp
+
+$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp
+
+$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h
+
+jvmtidocs:  $(JvmtiOutDir)/jvmti.html 
+
+$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html
+
+# #########################################################################
+
+clean :
+	rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
+
+cleanall :
+	rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
+
+# #########################################################################
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-debug	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,274 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define public interface.
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
+
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+		JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetFieldTypeAnnotations;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetMethodTypeAnnotations;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
+
+                # debug JVM
+                JVM_AccessVMBooleanFlag;
+                JVM_AccessVMIntFlag;
+                JVM_VMBreakPoint;
+
+                # miscellaneous functions
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
+
+                # This is for Forte Analyzer profiling support.
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
+        local:
+                *;
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-jsig	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,38 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define library interface.
+
+SUNWprivate_1.1 {
+        global:
+            JVM_begin_signal_setting;
+            JVM_end_signal_setting;
+            JVM_get_libjsig_version;
+            JVM_get_signal_action;
+            sigaction;
+            signal;
+            sigset;
+        local:
+                *;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-product	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,267 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define public interface.
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
+
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+                JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
+
+                # miscellaneous functions
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
+
+                # This is for Forte Analyzer profiling support.
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
+        local:
+                *;
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/ppc64.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,94 @@
+#
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Produce 64 bits object files.
+CFLAGS += -q64
+
+# Balanced tuning for recent versions of the POWER architecture (if supported by xlc).
+QTUNE=$(if $(CXX_SUPPORTS_BALANCED_TUNING),balanced,pwr5)
+
+# Try to speed up the interpreter: use ppc64 instructions and inline 
+# glue code for external functions.
+OPT_CFLAGS += -qarch=ppc64 -qtune=$(QTUNE) -qinlglue
+
+# We need variable length arrays
+CFLAGS += -qlanglvl=c99vla
+# Just to check for unwanted macro redefinitions
+CFLAGS += -qlanglvl=noredefmac
+
+# Suppress those "implicit private" warnings xlc gives.
+#  - The omitted keyword "private" is assumed for base class "...".
+CFLAGS += -qsuppress=1540-0198
+
+# Suppress the following numerous warning:
+#  - 1540-1090 (I) The destructor of "..." might not be called.
+#  - 1500-010: (W) WARNING in ...: Infinite loop.  Program may not stop.
+#    There are several infinite loops in the vm, suppress.
+CFLAGS += -qsuppress=1540-1090 -qsuppress=1500-010
+
+# Suppress 
+#  - 540-1088 (W) The exception specification is being ignored.
+# caused by throw() in declaration of new() in nmethod.hpp.
+CFLAGS += -qsuppress=1540-1088
+
+# Turn off floating-point optimizations that may alter program semantics
+OPT_CFLAGS += -qstrict
+
+# Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp 
+# and sharedRuntimeTrans.cpp on ppc64. 
+# -qstrict turns off the following optimizations:
+#   * Performing code motion and scheduling on computations such as loads
+#     and floating-point computations that may trigger an exception.
+#   * Relaxing conformance to IEEE rules.
+#   * Reassociating floating-point expressions.
+# When using '-qstrict' there still remains one problem
+# in javasoft.sqe.tests.api.java.lang.Math.sin5Tests when run in compile-all
+# mode, so don't optimize sharedRuntimeTrig.cpp at all.
+OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
+OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
+
+# xlc 10.01 parameters for ipa compile.
+QIPA_COMPILE=$(if $(CXX_IS_V10),-qipa)
+
+# Xlc 10.1 parameters for aggressive optimization:
+# - qhot=level=1: Most aggressive loop optimizations.
+# - qignerrno: Assume errno is not modified by system calls.
+# - qinline: Inline method calls. No suboptions for c++ compiles.
+# - qxflag=ASMMIDCOALFIX: Activate fix for -O3 problem in interpreter loop.
+# - qxflag=asmfastsync: Activate fix for performance problem with inline assembler with memory clobber.
+QV10_OPT=$(if $(CXX_IS_V10),-qxflag=ASMMIDCOALFIX -qxflag=asmfastsync)
+QV10_OPT_AGGRESSIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
+QV10_OPT_CONSERVATIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
+
+# Disallow inlining for synchronizer.cpp, but perform O3 optimizations.
+OPT_CFLAGS/synchronizer.o = $(OPT_CFLAGS) -qnoinline
+
+# Set all the xlC V10.1 options here.
+OPT_CFLAGS += $(QIPA_COMPILE) $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
+
+export OBJECT_MODE=64
+
+# Also build launcher as 64 bit executable.
+LAUNCHERFLAGS += -q64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/product.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making optimized version of Gamma VM
+# (This is the "product", not the "release" version.)
+
+# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
+OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS)
+OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
+
+# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
+
+# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
+CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-product
+
+# Remove ipa linkage altogether. Does not seem to benfit performance, but increases code footprint.
+LFLAGS_QIPA=
+
+SYSDEFS += -DPRODUCT
+VERSION = optimized
+
+# use -g to strip library as -x will discard its symbol table; -x is fine for
+# executables.
+# Note: these macros are not used in .debuginfo configs
+STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
+STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
+
+# If we can create .debuginfo files, then the VM is stripped in vm.make
+# and this macro is not used.
+# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/rules.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,203 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Common rules/macros for the vm, adlc.
+
+# Tell make that .cpp is important
+.SUFFIXES: .cpp $(SUFFIXES)
+
+DEMANGLER       = c++filt
+DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
+
+# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
+CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
+
+AS.S            = $(AS) $(ASFLAGS)
+
+COMPILE.CC       = $(CC_COMPILE) -c
+GENASM.CC        = $(CC_COMPILE) -S
+LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CC    = $(CC_COMPILE) -E
+
+COMPILE.CXX      = $(CXX_COMPILE) -c
+GENASM.CXX       = $(CXX_COMPILE) -S
+LINK.CXX         = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_NOPROF.CXX  = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
+LINK_LIB.CXX     = $(CXX) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CXX   = $(CXX_COMPILE) -E
+
+# cross compiling the jvm with c2 requires host compilers to build
+# adlc tool
+
+HOST.CXX_COMPILE      = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
+HOST.COMPILE.CXX      = $(HOST.CXX_COMPILE) -c
+HOST.LINK_NOPROF.CXX  = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
+
+
+# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
+REMOVE_TARGET   = rm -f $@
+
+# Note use of ALT_BOOTDIR to explicitly specify location of java and
+# javac; this is the same environment variable used in the J2SE build
+# process for overriding the default spec, which is BOOTDIR.
+# Note also that we fall back to using JAVA_HOME if neither of these is
+# specified.
+
+ifdef ALT_BOOTDIR
+
+RUN.JAVA  = $(ALT_BOOTDIR)/bin/java
+RUN.JAVAP = $(ALT_BOOTDIR)/bin/javap
+RUN.JAVAH = $(ALT_BOOTDIR)/bin/javah
+RUN.JAR   = $(ALT_BOOTDIR)/bin/jar
+COMPILE.JAVAC = $(ALT_BOOTDIR)/bin/javac
+COMPILE.RMIC = $(ALT_BOOTDIR)/bin/rmic
+BOOT_JAVA_HOME = $(ALT_BOOTDIR)
+
+else
+
+ifdef BOOTDIR
+
+RUN.JAVA  = $(BOOTDIR)/bin/java
+RUN.JAVAP = $(BOOTDIR)/bin/javap
+RUN.JAVAH = $(BOOTDIR)/bin/javah
+RUN.JAR   = $(BOOTDIR)/bin/jar
+COMPILE.JAVAC = $(BOOTDIR)/bin/javac
+COMPILE.RMIC  = $(BOOTDIR)/bin/rmic
+BOOT_JAVA_HOME = $(BOOTDIR)
+
+else
+
+ifdef JAVA_HOME
+
+RUN.JAVA  = $(JAVA_HOME)/bin/java
+RUN.JAVAP = $(JAVA_HOME)/bin/javap
+RUN.JAVAH = $(JAVA_HOME)/bin/javah
+RUN.JAR   = $(JAVA_HOME)/bin/jar
+COMPILE.JAVAC = $(JAVA_HOME)/bin/javac
+COMPILE.RMIC  = $(JAVA_HOME)/bin/rmic
+BOOT_JAVA_HOME = $(JAVA_HOME)
+
+else
+
+# take from the PATH, if ALT_BOOTDIR, BOOTDIR and JAVA_HOME are not defined
+# note that this is to support hotspot build without SA. To build
+# SA along with hotspot, you need to define ALT_BOOTDIR, BOOTDIR or JAVA_HOME
+
+RUN.JAVA  = java
+RUN.JAVAP = javap
+RUN.JAVAH = javah
+RUN.JAR   = jar
+COMPILE.JAVAC = javac
+COMPILE.RMIC  = rmic
+
+endif
+endif
+endif
+
+COMPILE.JAVAC += $(BOOTSTRAP_JAVAC_FLAGS)
+
+SUM = /usr/bin/sum
+
+# 'gmake MAKE_VERBOSE=y' gives all the gory details.
+QUIETLY$(MAKE_VERBOSE)  = @
+RUN.JAR$(MAKE_VERBOSE) += >/dev/null
+
+# Settings for javac
+BOOT_SOURCE_LANGUAGE_VERSION = 6
+BOOT_TARGET_CLASS_VERSION = 6
+JAVAC_FLAGS = -g -encoding ascii
+BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
+
+# With parallel makes, print a message at the end of compilation.
+ifeq    ($(findstring j,$(MFLAGS)),j)
+COMPILE_DONE    = && { echo Done with $<; }
+endif
+
+# Include $(NONPIC_OBJ_FILES) definition
+ifndef LP64
+include $(GAMMADIR)/make/pic.make
+endif
+
+include $(GAMMADIR)/make/altsrc.make
+
+# The non-PIC object files are only generated for 32 bit platforms.
+ifdef LP64
+%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+else
+%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
+	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
+	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
+endif
+
+%.o: %.s
+	@echo Assembling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(AS.S) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+
+%.s: %.cpp
+	@echo Generating assembly for $<
+	$(QUIETLY) $(GENASM.CXX) -o $@ $<
+	$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
+
+# Intermediate files (for debugging macros)
+%.i: %.cpp
+	@echo Preprocessing $< to $@
+	$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
+
+#  Override gnumake built-in rules which do sccs get operations badly.
+#  (They put the checked out code in the current directory, not in the
+#  directory of the original file.)  Since this is a symptom of a teamware
+#  failure, and since not all problems can be detected by gnumake due
+#  to incomplete dependency checking... just complain and stop.
+%:: s.%
+	@echo "========================================================="
+	@echo File $@
+	@echo is out of date with respect to its SCCS file.
+	@echo This file may be from an unresolved Teamware conflict.
+	@echo This is also a symptom of a Teamware bringover/putback failure
+	@echo in which SCCS files are updated but not checked out.
+	@echo Check for other out of date files in your workspace.
+	@echo "========================================================="
+	@exit 666
+
+%:: SCCS/s.%
+	@echo "========================================================="
+	@echo File $@
+	@echo is out of date with respect to its SCCS file.
+	@echo This file may be from an unresolved Teamware conflict.
+	@echo This is also a symptom of a Teamware bringover/putback failure
+	@echo in which SCCS files are updated but not checked out.
+	@echo Check for other out of date files in your workspace.
+	@echo "========================================================="
+	@exit 666
+
+.PHONY: default
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/sa.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,116 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (sa.make) is included from the sa.make in the
+# build directories.
+
+# This makefile is used to build Serviceability Agent java code
+# and generate JNI header file for native methods.
+
+include $(GAMMADIR)/make/aix/makefiles/rules.make
+
+include $(GAMMADIR)/make/defs.make
+
+AGENT_DIR = $(GAMMADIR)/agent
+
+include $(GAMMADIR)/make/sa.files
+
+TOPDIR    = $(shell echo `pwd`)
+GENERATED = $(TOPDIR)/../generated
+
+# tools.jar is needed by the JDI - SA binding
+SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
+
+# TODO: if it's a modules image, check if SA module is installed.
+MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
+
+AGENT_FILES_LIST := $(GENERATED)/agent.classes.list
+
+SA_CLASSDIR = $(GENERATED)/saclasses
+
+SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)"
+
+SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
+
+# if $(AGENT_DIR) does not exist, we don't build SA
+# also, we don't build SA on Itanium, PowerPC, ARM or zero.
+
+all:
+	if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
+             -a "$(SRCARCH)" != "arm" \
+             -a "$(SRCARCH)" != "ppc" \
+             -a "$(SRCARCH)" != "zero" ] ; then \
+	   $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
+	fi
+
+$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
+	$(QUIETLY) echo "Making $@"
+	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
+	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
+	  exit 1; \
+	fi
+	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
+	  echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
+	  echo ""; \
+	  exit 1; \
+	fi
+	$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
+	  mkdir -p $(SA_CLASSDIR);        \
+	fi
+# Note: When indented, make tries to execute the '$(shell' comment.
+# In some environments, cmd processors have limited line length.
+# To prevent the javac invocation in the next block from using
+# a very long cmd line, we use javac's @file-list option. We
+# generate the file lists using make's built-in 'foreach' control
+# flow which also avoids cmd processor line length issues. Since
+# the 'foreach' is done as part of make's macro expansion phase,
+# the initialization of the lists is also done in the same phase
+# using '$(shell rm ...' instead of using the more traditional
+# 'rm ...' rule.
+	$(shell rm -rf $(AGENT_FILES_LIST))
+# gnumake 3.78.1 does not accept the *'s that
+# are in AGENT_FILES, so use the shell to expand them.
+# Be extra carefull to not produce too long command lines in the shell!
+	$(foreach file,$(AGENT_FILES),$(shell ls -1 $(file) >> $(AGENT_FILES_LIST)))
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES_LIST)
+	$(QUIETLY) $(REMOTE) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
+	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
+	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
+	$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
+	$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
+	$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
+	$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sparc.SPARCThreadContext
+
+clean:
+	rm -rf $(SA_CLASSDIR)
+	rm -rf $(GENERATED)/sa-jdi.jar
+	rm -rf $(AGENT_FILES_LIST)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/saproc.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,117 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+include $(GAMMADIR)/make/defs.make
+
+# Rules to build serviceability agent library, used by vm.make
+
+# libsaproc.so: serviceability agent
+
+SAPROC = saproc
+LIBSAPROC = lib$(SAPROC).so
+
+LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
+LIBSAPROC_DIZ         = lib$(SAPROC).diz
+
+AGENT_DIR = $(GAMMADIR)/agent
+
+SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
+
+SASRCFILES = $(SASRCDIR)/salibelf.c                   \
+             $(SASRCDIR)/symtab.c                     \
+             $(SASRCDIR)/libproc_impl.c               \
+             $(SASRCDIR)/ps_proc.c                    \
+             $(SASRCDIR)/ps_core.c                    \
+             $(SASRCDIR)/LinuxDebuggerLocal.c         \
+
+SAMAPFILE = $(SASRCDIR)/mapfile
+
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
+DEST_SAPROC_DIZ       = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  SA_DEBUG_CFLAGS = -g
+endif
+
+# if $(AGENT_DIR) does not exist, we don't build SA
+# also, we don't build SA on Itanium, PPC, ARM or zero.
+
+ifneq ($(wildcard $(AGENT_DIR)),)
+ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),)
+  BUILDLIBSAPROC = $(LIBSAPROC)
+endif
+endif
+
+
+SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
+
+$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
+	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
+	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
+	  exit 1; \
+	fi
+	@echo Making SA debugger back-end...
+	$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE                   \
+		   -D_FILE_OFFSET_BITS=64                               \
+                   $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG)     \
+		   $(BIN_UTILS)						\
+	           -I$(SASRCDIR)                                        \
+	           -I$(GENERATED)                                       \
+	           -I$(BOOT_JAVA_HOME)/include                          \
+	           -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family)    \
+	           $(SASRCFILES)                                        \
+	           $(SA_LFLAGS)                                         \
+	           $(SA_DEBUG_CFLAGS)                                   \
+	           -o $@                                                \
+	           -lthread_db
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+    # implied else here is no stripping at all
+    endif
+  endif
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) $(LIBSAPROC_DEBUGINFO)
+  endif
+endif
+
+install_saproc: $(BUILDLIBSAPROC)
+	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
+	  test -f $(LIBSAPROC_DEBUGINFO) &&                  \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
+	  test -f $(LIBSAPROC_DIZ) &&                  \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	fi
+
+.PHONY: install_saproc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/top.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,144 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# top.make is included in the Makefile in the build directories.
+# It DOES NOT include the vm dependency info in order to be faster.
+# Its main job is to implement the incremental form of make lists.
+# It also:
+#   -builds and runs adlc via adlc.make
+#   -generates JVMTI source and docs via jvmti.make (JSR-163)
+#   -generate sa-jdi.jar (JDI binding to core files)
+
+# It assumes the following flags are set:
+# CFLAGS Platform_file, Src_Dirs_I, Src_Dirs_V, SYSDEFS, AOUT, Obj_Files
+
+# -- D. Ungar (5/97) from a file by Bill Bush
+
+# Don't override the built-in $(MAKE).
+# Instead, use "gmake" (or "gnumake") from the command line.  --Rose
+#MAKE = gmake
+
+include $(GAMMADIR)/make/altsrc.make
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+VM          = $(GAMMADIR)/src/share/vm
+Plat_File   = $(Platform_file)
+CDG         = cd $(GENERATED); 
+
+ifneq ($(USE_PRECOMPILED_HEADER),0)
+UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS) 
+else
+UpdatePCH = \# precompiled header is not used
+endif
+
+Cached_plat = $(GENERATED)/platform.current
+
+AD_Dir   = $(GENERATED)/adfiles
+ADLC     = $(AD_Dir)/adlc
+AD_Spec  = $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad)
+AD_Src   = $(call altsrc-replace,$(HS_COMMON_SRC)/share/vm/adlc)
+AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp
+AD_Files = $(AD_Names:%=$(AD_Dir)/%)
+
+# AD_Files_If_Required/COMPILER1 = ad_stuff
+AD_Files_If_Required/COMPILER2 = ad_stuff
+AD_Files_If_Required/TIERED = ad_stuff
+AD_Files_If_Required = $(AD_Files_If_Required/$(TYPE))
+
+# Wierd argument adjustment for "gnumake -j..."
+adjust-mflags   = $(GENERATED)/adjust-mflags
+MFLAGS-adjusted = -r `$(adjust-mflags) "$(MFLAGS)" "$(HOTSPOT_BUILD_JOBS)"`
+
+
+# default target: update lists, make vm
+# done in stages to force sequential order with parallel make
+#
+
+default: vm_build_preliminaries the_vm
+	@echo All done.
+
+# This is an explicit dependency for the sake of parallel makes.
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
+	@# We need a null action here, so implicit rules don't get consulted.
+
+$(Cached_plat): $(Plat_File)
+	$(CDG) cp $(Plat_File) $(Cached_plat)
+
+# make AD files as necessary
+ad_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f adlc.make $(MFLAGS-adjusted)
+
+# generate JVMTI files from the spec
+jvmti_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
+
+# generate trace files
+trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+
+# generate SA jar files and native header
+sa_stuff:
+	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
+
+# and the VM: must use other makefile with dependencies included
+
+# We have to go to great lengths to get control over the -jN argument
+# to the recursive invocation of vm.make.  The problem is that gnumake
+# resets -jN to -j1 for recursive runs.  (How helpful.)
+# Note that the user must specify the desired parallelism level via a
+# command-line or environment variable name HOTSPOT_BUILD_JOBS.
+$(adjust-mflags): $(GAMMADIR)/make/$(Platform_os_family)/makefiles/adjust-mflags.sh
+	@+rm -f $@ $@+
+	@+cat $< > $@+
+	@+chmod +x $@+
+	@+mv $@+ $@
+
+the_vm: vm_build_preliminaries $(adjust-mflags)
+	@$(UpdatePCH)
+	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
+
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
+
+# next rules support "make foo.[ois]"
+
+%.o %.i %.s:
+	$(UpdatePCH) 
+	$(MAKE) -f vm.make $(MFLAGS) $@
+	#$(MAKE) -f vm.make $@
+
+# this should force everything to be rebuilt
+clean: 
+	rm -f $(GENERATED)/*.class
+	$(MAKE) -f vm.make $(MFLAGS) clean
+
+# just in case it doesn't, this should do it
+realclean:
+	$(MAKE) -f vm.make $(MFLAGS) clean
+	rm -fr $(GENERATED)
+
+.PHONY: default vm_build_preliminaries
+.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean
+.PHONY: checks check_os_version install
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/trace.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,120 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+include $(GAMMADIR)/make/linux/makefiles/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+# #########################################################################
+
+HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
+  echo "true"; else echo "false";\
+  fi)
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+TraceOutDir   = $(GENERATED)/tracefiles
+
+TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
+TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+	traceEventIds.hpp     \
+	traceTypes.hpp
+
+ifeq ($(HAS_ALT_SRC), true)
+TraceGeneratedNames +=  \
+	traceRequestables.hpp \
+    traceEventControl.hpp
+
+ifneq ($(INCLUDE_TRACE), false)
+TraceGeneratedNames += traceProducer.cpp
+endif
+
+endif
+
+TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
+
+XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
+	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+ifeq ($(HAS_ALT_SRC), true)
+	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
+endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(TraceGeneratedFiles)
+
+GENERATE_CODE= \
+  $(QUIETLY) echo Generating $@; \
+  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
+  test -f $@
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+ifeq ($(HAS_ALT_SRC), false)
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+endif
+
+# #########################################################################
+
+clean cleanall:
+	rm $(TraceGeneratedFiles)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/vm.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,377 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Rules to build JVM and related libraries, included from vm.make in the build
+# directory.
+
+# Common build rules.
+MAKEFILES_DIR=$(GAMMADIR)/make/$(Platform_os_family)/makefiles
+include $(MAKEFILES_DIR)/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+default: build
+
+#----------------------------------------------------------------------
+# Defs
+
+GENERATED     = ../generated
+DEP_DIR       = $(GENERATED)/dependencies
+
+# reads the generated files defining the set of .o's and the .o .h dependencies
+-include $(DEP_DIR)/*.d
+
+# read machine-specific adjustments (%%% should do this via buildtree.make?)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  include $(MAKEFILES_DIR)/zeroshark.make
+else
+  include $(MAKEFILES_DIR)/$(BUILDARCH).make
+endif
+
+# set VPATH so make knows where to look for source files
+# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
+# The adfiles directory contains ad_<arch>.[ch]pp.
+# The jvmtifiles directory contains jvmti*.[ch]pp
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+VPATH += $(Src_Dirs_V:%=%:)
+
+# set INCLUDES for C preprocessor.
+Src_Dirs_I += $(GENERATED)
+# The order is important for the precompiled headers to work.
+INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
+
+# SYMFLAG is used by {jsig,saproc}.make
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  # always build with debug info when we can create .debuginfo files
+  SYMFLAG = -g
+else
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
+endif
+
+# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
+# in $(GAMMADIR)/make/defs.make
+ifeq ($(HOTSPOT_BUILD_VERSION),)
+  BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
+else
+  BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)\""
+endif
+
+# The following variables are defined in the generated flags.make file.
+BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HS_BUILD_VER)\""
+JRE_VERSION   = -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\""
+HS_LIB_ARCH   = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\"
+BUILD_TARGET  = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
+BUILD_USER    = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
+VM_DISTRO     = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
+
+CXXFLAGS =           \
+  ${SYSDEFS}         \
+  ${INCLUDES}        \
+  ${BUILD_VERSION}   \
+  ${BUILD_TARGET}    \
+  ${BUILD_USER}      \
+  ${HS_LIB_ARCH}     \
+  ${VM_DISTRO}
+
+# This is VERY important! The version define must only be supplied to vm_version.o
+# If not, ccache will not re-use the cache at all, since the version string might contain
+# a time and date.
+CXXFLAGS/vm_version.o += ${JRE_VERSION}
+
+CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
+
+# File specific flags
+CXXFLAGS += $(CXXFLAGS/BYFILE)
+
+
+# CFLAGS_WARN holds compiler options to suppress/enable warnings.
+CFLAGS += $(CFLAGS_WARN/BYFILE)
+
+# Do not use C++ exception handling
+CFLAGS += $(CFLAGS/NOEX)
+
+# Extra flags from gnumake's invocation or environment
+CFLAGS += $(EXTRA_CFLAGS)
+LFLAGS += $(EXTRA_CFLAGS)
+
+# Don't set excutable bit on stack segment
+# the same could be done by separate execstack command
+#LFLAGS += -Xlinker -z -Xlinker noexecstack
+
+LIBS += -lm -ldl -lpthread
+
+# By default, link the *.o into the library, not the executable.
+LINK_INTO$(LINK_INTO) = LIBJVM
+
+JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+
+#----------------------------------------------------------------------
+# jvm_db & dtrace
+include $(MAKEFILES_DIR)/dtrace.make
+
+#----------------------------------------------------------------------
+# JVM
+
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+
+CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
+
+LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+LIBJVM_DIZ         = lib$(JVM).diz
+
+SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
+
+SOURCE_PATHS=\
+  $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
+      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(SRCARCH)/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_family)_$(SRCARCH)/vm
+
+CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+
+ifneq ($(INCLUDE_TRACE), false)
+CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
+  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
+  fi)
+endif
+
+COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
+COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
+
+COMPILER2_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/opto)
+COMPILER2_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/libadt)
+COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/opto
+COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
+COMPILER2_PATHS += $(GENERATED)/adfiles
+
+SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
+
+# Include dirs per type.
+Src_Dirs/CORE      := $(CORE_PATHS)
+Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
+Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
+Src_Dirs/TIERED    := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
+Src_Dirs/ZERO      := $(CORE_PATHS)
+Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
+Src_Dirs := $(Src_Dirs/$(TYPE))
+
+COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
+COMPILER1_SPECIFIC_FILES := c1_\*
+SHARK_SPECIFIC_FILES     := shark
+ZERO_SPECIFIC_FILES      := zero
+
+# Always exclude these.
+Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
+
+# Exclude per type.
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
+
+Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
+
+# Disable ELF decoder on AIX (AIX uses XCOFF).
+Src_Files_EXCLUDE += decoder_elf.cpp elfFile.cpp elfStringTable.cpp elfSymbolTable.cpp elfFuncDescTable.cpp
+
+# Special handling of arch model.
+ifeq ($(Platform_arch_model), x86_32)
+Src_Files_EXCLUDE += \*x86_64\*
+endif
+ifeq ($(Platform_arch_model), x86_64)
+Src_Files_EXCLUDE += \*x86_32\*
+endif
+
+# Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
+define findsrc
+	$(notdir $(shell find $(1)/. ! -name . -prune \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
+endef
+
+Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
+
+Obj_Files = $(sort $(addsuffix .o,$(basename $(Src_Files))))
+
+JVM_OBJ_FILES = $(Obj_Files)
+
+vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
+
+mapfile : $(MAPFILE) vm.def
+	rm -f $@
+	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
+                 { system ("cat vm.def"); }		\
+               else					\
+                 { print $$0 }				\
+             }' > $@ < $(MAPFILE)
+
+mapfile_reorder : mapfile $(REORDERFILE)
+	rm -f $@
+	cat $^ > $@
+
+vm.def: $(Res_Files) $(Obj_Files)
+	sh $(GAMMADIR)/make/aix/makefiles/build_vm_def.sh *.o > $@
+
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+  STATIC_CXX = false
+else
+  ifeq ($(ZERO_LIBARCH), ppc64)
+    STATIC_CXX = false
+  else
+    STATIC_CXX = true
+  endif
+endif
+
+ifeq ($(LINK_INTO),AOUT)
+  LIBJVM.o                 =
+  LIBJVM_MAPFILE           =
+  LIBS_VM                  = $(LIBS)
+else
+  LIBJVM.o                 = $(JVM_OBJ_FILES)
+  LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
+  LFLAGS_VM$(LDNOMAP)      += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
+# xlC_r ignores the -o= syntax
+# LFLAGS_VM                += $(SONAMEFLAG:SONAME=$(LIBJVM))
+
+  # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
+  # get around library dependency and compatibility issues. Must use gcc not
+  # g++ to link.
+  LIBS_VM                  += $(STATIC_STDCXX) $(LIBS)
+endif
+
+LINK_VM = $(LINK_LIB.CXX)
+
+# create loadmap for libjvm.so by default. Helps in diagnosing some problems.
+LFLAGS_VM += -bloadmap:libjvm.loadmap
+
+# rule for building precompiled header
+$(PRECOMPILED_HEADER):
+	$(QUIETLY) echo Generating precompiled header $@
+	$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
+
+# making the library:
+
+ifneq ($(JVM_BASE_ADDR),)
+# By default shared library is linked at base address == 0. Modify the
+# linker script if JVM prefers a different base location. It can also be
+# implemented with 'prelink -r'. But 'prelink' is not (yet) available on
+# our build platform (AS-2.1).
+LD_SCRIPT = libjvm.so.lds
+$(LD_SCRIPT): $(LIBJVM_MAPFILE)
+	$(QUIETLY) {                                                \
+	  rm -rf $@;                                                \
+	  $(LINK_VM) -Wl,--verbose $(LFLAGS_VM) 2>&1             |  \
+	    sed -e '/^======/,/^======/!d'                          \
+		-e '/^======/d'                                     \
+		-e 's/0\( + SIZEOF_HEADERS\)/$(JVM_BASE_ADDR)\1/'   \
+		> $@;                                               \
+	}
+LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT)
+endif
+
+# With more recent Redhat releases (or the cutting edge version Fedora), if
+# SELinux is configured to be enabled, the runtime linker will fail to apply
+# the text relocation to libjvm.so considering that it is built as a non-PIC
+# DSO. To workaround that, we run chcon to libjvm.so after it is built. See
+# details in bug 6538311.
+$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
+	$(QUIETLY) {                                                      \
+	    echo Linking vm...;                                           \
+	    $(LINK_LIB.CXX/PRE_HOOK)                                      \
+	    $(LINK_VM) $(LD_SCRIPT_FLAG)                                  \
+		       $(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM); \
+	    $(LINK_LIB.CXX/POST_HOOK)                                     \
+	    rm -f $@.1; ln -s $@ $@.1;                                    \
+	}
+# No security contexts on AIX
+#           if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then              \
+#	      if [ -x /usr/sbin/selinuxenabled ] ; then                 \
+#	        /usr/sbin/selinuxenabled;                               \
+#               if [ $$? = 0 ] ; then					\
+#		  /usr/bin/chcon -t textrel_shlib_t $@;                 \
+#		  if [ $$? != 0 ]; then                                 \
+#		    echo "ERROR: Cannot chcon $@";			\
+#		  fi							\
+#	        fi							\
+#	      fi                                                        \
+#           fi 							        \
+#	}
+
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+#    ifeq ($(STRIP_POLICY),all_strip)
+#	$(QUIETLY) $(STRIP) $@
+#    else
+#      ifeq ($(STRIP_POLICY),min_strip)
+#	$(QUIETLY) $(STRIP) -g $@
+#      # implied else here is no stripping at all
+#      endif
+#    endif
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+#	$(RM) $(LIBJVM_DEBUGINFO)
+#  endif
+#endif
+
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
+DEST_JVM_DIZ       = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
+
+install_jvm: $(LIBJVM)
+	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
+	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
+
+#----------------------------------------------------------------------
+# Other files
+
+# Signal interposition library
+include $(MAKEFILES_DIR)/jsig.make
+
+# Serviceability agent
+include $(MAKEFILES_DIR)/saproc.make
+
+#----------------------------------------------------------------------
+
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
+
+install: install_jvm install_jsig install_saproc
+
+.PHONY: default build install install_jvm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/xlc.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,159 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013 SAP. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+#------------------------------------------------------------------------
+# CC, CXX & AS
+
+# Set compiler explicitly
+CXX = $(COMPILER_PATH)xlC_r
+CC  = $(COMPILER_PATH)xlc_r
+HOSTCXX = $(CXX)
+HOSTCC  = $(CC)
+
+AS  = $(CC) -c
+
+# get xlc version
+CXX_VERSION   := $(shell $(CXX) -qversion 2>&1 | sed -n 's/.*Version: \([0-9.]*\)/\1/p')
+
+# xlc 08.00.0000.0023 and higher supports -qtune=balanced
+CXX_SUPPORTS_BALANCED_TUNING=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 080000000023 ] ; then echo "true" ; fi)
+# xlc 10.01 is used with aggressive optimizations to boost performance
+CXX_IS_V10=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 100100000000 ] ; then echo "true" ; fi)
+
+# check for precompiled headers support
+
+# Switch off the precompiled header support. Neither xlC 8.0 nor xlC 10.0
+# support precompiled headers. Both "understand" the command line switches "-qusepcomp" and 
+# "-qgenpcomp" but when we specify them the following message is printed:
+# "1506-755 (W) The -qusepcomp option is not supported in this release."
+USE_PRECOMPILED_HEADER = 0
+ifneq ($(USE_PRECOMPILED_HEADER),0)
+PRECOMPILED_HEADER_DIR=.
+PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
+endif
+
+
+#------------------------------------------------------------------------
+# Compiler flags
+
+# position-independent code
+PICFLAG = -qpic=large
+
+VM_PICFLAG/LIBJVM = $(PICFLAG)
+VM_PICFLAG/AOUT   =
+VM_PICFLAG        = $(VM_PICFLAG/$(LINK_INTO))
+
+CFLAGS += $(VM_PICFLAG)
+CFLAGS += -qnortti
+CFLAGS += -qnoeh
+
+CFLAGS += -D_REENTRANT
+# no xlc counterpart for -fcheck-new
+# CFLAGS += -fcheck-new
+
+ARCHFLAG = -q64
+
+CFLAGS     += $(ARCHFLAG)
+AOUT_FLAGS += $(ARCHFLAG)
+LFLAGS     += $(ARCHFLAG)
+ASFLAGS    += $(ARCHFLAG)
+
+# Use C++ Interpreter
+ifdef CC_INTERP
+  CFLAGS += -DCC_INTERP
+endif
+
+# Keep temporary files (.ii, .s)
+# no counterpart on xlc for -save-temps, -pipe
+
+# Compiler warnings are treated as errors
+# Do not treat warnings as errors
+# WARNINGS_ARE_ERRORS = -Werror
+# Except for a few acceptable ones
+# ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
+# CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
+CFLAGS_WARN/COMMON = 
+CFLAGS_WARN/DEFAULT = $(CFLAGS_WARN/COMMON) $(EXTRA_WARNINGS)
+# Special cases
+CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) 
+
+# The flags to use for an optimized build
+OPT_CFLAGS += -O3
+
+# Hotspot uses very unstrict aliasing turn this optimization off
+OPT_CFLAGS += -qalias=noansi
+
+OPT_CFLAGS/NOOPT=-qnoopt
+
+DEPFLAGS = -qmakedep=gcc -MF $(DEP_DIR)/$(@:%=%.d)
+
+#------------------------------------------------------------------------
+# Linker flags
+
+# statically link libstdc++.so, work with gcc but ignored by g++
+STATIC_STDCXX = -Wl,-lC_r
+
+# Enable linker optimization
+# no counterpart on xlc for this 
+# LFLAGS += -Xlinker -O1
+
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+# MAPFLAG = -Xlinker --version-script=FILENAME
+
+# Build shared library
+SHARED_FLAG = -q64 -b64 -bexpall -G -bnoentry -qmkshrobj -brtl -bnolibpath
+
+#------------------------------------------------------------------------
+# Debug flags
+
+# Always compile with '-g' to get symbols in the stacktraces in the hs_err file
+DEBUG_CFLAGS += -g
+FASTDEBUG_CFLAGS += -g
+OPT_CFLAGS += -g
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  DEBUG_CFLAGS = -g
+  CFLAGS += $(DEBUG_CFLAGS)
+endif
+
+# If we are building HEADLESS, pass on to VM
+# so it can set the java.awt.headless property
+ifdef HEADLESS
+CFLAGS += -DHEADLESS
+endif
+
+# We are building Embedded for a small device
+# favor code space over speed
+ifdef MINIMIZE_RAM_USAGE
+CFLAGS += -DMINIMIZE_RAM_USAGE
+endif
+
+ifdef CROSS_COMPILE_ARCH
+  STRIP = $(ALT_COMPILER_PATH)/strip
+else
+  STRIP = strip
+endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/platform_ppc64	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,17 @@
+os_family = aix
+
+arch = ppc
+
+arch_model = ppc_64
+
+os_arch = aix_ppc
+
+os_arch_model = aix_ppc_64
+
+lib_arch = ppc64
+
+compiler = xlc
+
+gnu_dis_arch = ppc64
+
+sysdefs = -DAIX -DPPC64
--- a/make/defs.make	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/defs.make	Wed Apr 02 09:59:18 2014 -0700
@@ -176,11 +176,15 @@
   HOST := $(shell uname -n)
 endif
 
-# If not SunOS, not Linux and not BSD, assume Windows
+# If not SunOS, not Linux not BSD and not AIX, assume Windows
 ifneq ($(OS), Linux)
   ifneq ($(OS), SunOS)
     ifneq ($(OS), bsd)
-      OSNAME=windows
+      ifneq ($(OS), AIX)
+        OSNAME=windows
+      else
+        OSNAME=aix
+      endif
     else
       OSNAME=bsd
     endif
@@ -269,7 +273,7 @@
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
   # is not explicitly listed below, it is treated as x86.
-  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
+  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
   ARCH/sparc64= sparc
@@ -295,6 +299,11 @@
       BUILDARCH = sparcv9
     endif
   endif
+  ifeq ($(BUILDARCH), ppc)
+    ifdef LP64
+      BUILDARCH = ppc64
+    endif
+  endif
 
   # LIBARCH is 1:1 mapping from BUILDARCH
   LIBARCH         = $(LIBARCH/$(BUILDARCH))
@@ -303,12 +312,12 @@
   LIBARCH/sparc   = sparc
   LIBARCH/sparcv9 = sparcv9
   LIBARCH/ia64    = ia64
-  LIBARCH/ppc64   = ppc
+  LIBARCH/ppc64   = ppc64
   LIBARCH/ppc     = ppc
   LIBARCH/arm     = arm
   LIBARCH/zero    = $(ZERO_LIBARCH)
 
-  LP64_ARCH = sparcv9 amd64 ia64 zero
+  LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
 endif
 
 # Required make macro settings for all platforms
--- a/make/excludeSrc.make	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/excludeSrc.make	Wed Apr 02 09:59:18 2014 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -87,9 +87,10 @@
 	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
 	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
-	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
+	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \
+	g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \
 	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
-	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
+	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
 	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
 	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
 	parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
--- a/make/hotspot_version	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/hotspot_version	Wed Apr 02 09:59:18 2014 -0700
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=20
-HS_BUILD_NUMBER=07
+HS_BUILD_NUMBER=08
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/Makefile	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/linux/Makefile	Wed Apr 02 09:59:18 2014 -0700
@@ -66,6 +66,10 @@
     FORCE_TIERED=1
   endif
 endif
+# C1 is not ported on ppc64(le), so we cannot build a tiered VM:
+ifneq (,$(filter $(ARCH),ppc64 pp64le))
+  FORCE_TIERED=0
+endif
 
 ifdef LP64
   ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
--- a/make/linux/makefiles/buildtree.make	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/linux/makefiles/buildtree.make	Wed Apr 02 09:59:18 2014 -0700
@@ -193,6 +193,7 @@
 DATA_MODE/sparc = 32
 DATA_MODE/sparcv9 = 64
 DATA_MODE/amd64 = 64
+DATA_MODE/ppc64 = 64
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
--- a/make/linux/makefiles/defs.make	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/linux/makefiles/defs.make	Wed Apr 02 09:59:18 2014 -0700
@@ -120,6 +120,15 @@
   HS_ARCH          = ppc
 endif
 
+# PPC64
+ifeq ($(ARCH), ppc64)
+  ARCH_DATA_MODEL  = 64
+  MAKE_ARGS        += LP64=1
+  PLATFORM         = linux-ppc64
+  VM_PLATFORM      = linux_ppc64
+  HS_ARCH          = ppc
+endif
+
 # On 32 bit linux we build server and client, on 64 bit just server.
 ifeq ($(JVM_VARIANTS),)
   ifeq ($(ARCH_DATA_MODEL), 32)
@@ -255,7 +264,7 @@
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
 
-ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
--- a/make/linux/makefiles/gcc.make	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/linux/makefiles/gcc.make	Wed Apr 02 09:59:18 2014 -0700
@@ -181,6 +181,7 @@
 ifndef E500V2
 ARCHFLAG/ppc     =  -mcpu=powerpc
 endif
+ARCHFLAG/ppc64   =  -m64
 
 CFLAGS     += $(ARCHFLAG)
 AOUT_FLAGS += $(ARCHFLAG)
@@ -346,6 +347,7 @@
   DEBUG_CFLAGS/amd64 = -g
   DEBUG_CFLAGS/arm   = -g
   DEBUG_CFLAGS/ppc   = -g
+  DEBUG_CFLAGS/ppc64 = -g
   DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -361,6 +363,7 @@
     FASTDEBUG_CFLAGS/amd64 = -g
     FASTDEBUG_CFLAGS/arm   = -g
     FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS/ppc64 = -g
     FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -375,6 +378,7 @@
     OPT_CFLAGS/amd64 = -g
     OPT_CFLAGS/arm   = -g
     OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS/ppc64 = -g
     OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
     ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/ppc64.make	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# make c code know it is on a 64 bit platform.
+CFLAGS += -D_LP64=1
+
+# fixes `relocation truncated to fit' error for gcc 4.1.
+CFLAGS += -mminimal-toc
+
+# finds use ppc64 instructions, but schedule for power5
+CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+
+# let linker find external 64 bit libs.
+LFLAGS_VM += -L/lib64
+
+# specify lib format.
+LFLAGS_VM +=  -Wl,-melf64ppc
--- a/make/linux/platform_ppc	Mon Mar 31 14:07:26 2014 -0700
+++ b/make/linux/platform_ppc	Wed Apr 02 09:59:18 2014 -0700
@@ -2,11 +2,11 @@
 
 arch = ppc
 
-arch_model = ppc
+arch_model = ppc_32
 
 os_arch = linux_ppc
 
-os_arch_model = linux_ppc
+os_arch_model = linux_ppc_32
 
 lib_arch = ppc
 
@@ -14,4 +14,4 @@
 
 gnu_dis_arch = ppc
 
-sysdefs = -DLINUX -D_GNU_SOURCE -DPPC
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/platform_ppc64	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,17 @@
+os_family = linux
+
+arch = ppc
+
+arch_model = ppc_64
+
+os_arch = linux_ppc
+
+os_arch_model = linux_ppc_64
+
+lib_arch = ppc64
+
+compiler = gcc
+
+gnu_dis_arch = ppc64
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.cpp	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,700 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#endif // INCLUDE_ALL_GCS
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
+int AbstractAssembler::code_fill_byte() {
+  return 0x00;                  // illegal instruction 0x00000000
+}
+
+void Assembler::print_instruction(int inst) {
+  Unimplemented();
+}
+
+// Patch instruction `inst' at offset `inst_pos' to refer to
+// `dest_pos' and return the resulting instruction.  We should have
+// pcs, not offsets, but since all is relative, it will work out fine.
+int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
+  int m = 0; // mask for displacement field
+  int v = 0; // new value for displacement field
+
+  switch (inv_op_ppc(inst)) {
+  case b_op:  m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
+  case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
+    default: ShouldNotReachHere();
+  }
+  return inst & ~m | v;
+}
+
+// Return the offset, relative to _code_begin, of the destination of
+// the branch inst at offset pos.
+int Assembler::branch_destination(int inst, int pos) {
+  int r = 0;
+  switch (inv_op_ppc(inst)) {
+    case b_op:  r = bxx_destination_offset(inst, pos); break;
+    case bc_op: r = inv_bd_field(inst, pos); break;
+    default: ShouldNotReachHere();
+  }
+  return r;
+}
+
+// Low-level andi-one-instruction-macro.
+void Assembler::andi(Register a, Register s, const int ui16) {
+  assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
+  if (is_power_of_2_long(((jlong) ui16)+1)) {
+    // pow2minus1
+    clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
+  } else if (is_power_of_2_long((jlong) ui16)) {
+    // pow2
+    rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
+  } else if (is_power_of_2_long((jlong)-ui16)) {
+    // negpow2
+    clrrdi(a, s, log2_long((jlong)-ui16));
+  } else {
+    andi_(a, s, ui16);
+  }
+}
+
+// RegisterOrConstant version.
+void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::ld(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::ld(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::ldx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::ld(d, 0, roc.as_register());
+    else
+      Assembler::ldx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lwa(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lwa(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lwax(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lwa(d, 0, roc.as_register());
+    else
+      Assembler::lwax(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lwz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lwz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lwzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lwz(d, 0, roc.as_register());
+    else
+      Assembler::lwzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lha(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lha(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lhax(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lha(d, 0, roc.as_register());
+    else
+      Assembler::lhax(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lhz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lhz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lhzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lhz(d, 0, roc.as_register());
+    else
+      Assembler::lhzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lbz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lbz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lbzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lbz(d, 0, roc.as_register());
+    else
+      Assembler::lbzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::std(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::std(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stdx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::std(d, 0, roc.as_register());
+    else
+      Assembler::stdx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::stw(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::stw(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stwx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::stw(d, 0, roc.as_register());
+    else
+      Assembler::stwx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::sth(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::sth(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::sthx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::sth(d, 0, roc.as_register());
+    else
+      Assembler::sthx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::stb(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::stb(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stbx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::stb(d, 0, roc.as_register());
+    else
+      Assembler::stbx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(c, 16), "too big");
+    addi(d, s1, (int)c);
+  }
+  else add(d, roc.as_register(), s1);
+}
+
+void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(-c, 16), "too big");
+    addi(d, s1, (int)-c);
+  }
+  else subf(d, roc.as_register(), s1);
+}
+
+void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(c, 16), "too big");
+    cmpdi(d, s1, (int)c);
+  }
+  else cmpd(d, roc.as_register(), s1);
+}
+
+// Load a 64 bit constant. Patchable.
+void Assembler::load_const(Register d, long x, Register tmp) {
+  // 64-bit value: x = xa xb xc xd
+  int xa = (x >> 48) & 0xffff;
+  int xb = (x >> 32) & 0xffff;
+  int xc = (x >> 16) & 0xffff;
+  int xd = (x >>  0) & 0xffff;
+  if (tmp == noreg) {
+    Assembler::lis( d, (int)(short)xa);
+    Assembler::ori( d, d, (unsigned int)xb);
+    Assembler::sldi(d, d, 32);
+    Assembler::oris(d, d, (unsigned int)xc);
+    Assembler::ori( d, d, (unsigned int)xd);
+  } else {
+    // exploit instruction level parallelism if we have a tmp register
+    assert_different_registers(d, tmp);
+    Assembler::lis(tmp, (int)(short)xa);
+    Assembler::lis(d, (int)(short)xc);
+    Assembler::ori(tmp, tmp, (unsigned int)xb);
+    Assembler::ori(d, d, (unsigned int)xd);
+    Assembler::insrdi(d, tmp, 32, 0);
+  }
+}
+
+// Load a 64 bit constant, optimized, not identifyable.
+// Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
+// 16 bit immediate offset.
+int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
+  // Avoid accidentally trying to use R0 for indexed addressing.
+  assert(d != R0, "R0 not allowed");
+  assert_different_registers(d, tmp);
+
+  short xa, xb, xc, xd; // Four 16-bit chunks of const.
+  long rem = x;         // Remaining part of const.
+
+  xd = rem & 0xFFFF;    // Lowest 16-bit chunk.
+  rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
+
+  if (rem == 0) { // opt 1: simm16
+    li(d, xd);
+    return 0;
+  }
+
+  xc = rem & 0xFFFF; // Next 16-bit chunk.
+  rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
+
+  if (rem == 0) { // opt 2: simm32
+    lis(d, xc);
+  } else { // High 32 bits needed.
+
+    if (tmp != noreg) { // opt 3: We have a temp reg.
+      // No carry propagation between xc and higher chunks here (use logical instructions).
+      xa = (x >> 48) & 0xffff;
+      xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
+      bool load_xa = (xa != 0) || (xb < 0);
+      bool return_xd = false;
+
+      if (load_xa) { lis(tmp, xa); }
+      if (xc) { lis(d, xc); }
+      if (load_xa) {
+        if (xb) { ori(tmp, tmp, (unsigned short)xb); } // No addi, we support tmp == R0.
+      } else {
+        li(tmp, xb); // non-negative
+      }
+      if (xc) {
+        if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
+        else if (xd) { addi(d, d, xd); }
+      } else {
+        li(d, xd);
+      }
+      insrdi(d, tmp, 32, 0);
+      return return_xd ? xd : 0; // non-negative
+    }
+
+    xb = rem & 0xFFFF; // Next 16-bit chunk.
+    rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
+
+    xa = rem & 0xFFFF; // Highest 16-bit chunk.
+
+    // opt 4: avoid adding 0
+    if (xa) { // Highest 16-bit needed?
+      lis(d, xa);
+      if (xb) { addi(d, d, xb); }
+    } else {
+      li(d, xb);
+    }
+    sldi(d, d, 32);
+    if (xc) { addis(d, d, xc); }
+  }
+
+  // opt 5: Return offset to be inserted into following instruction.
+  if (return_simm16_rest) return xd;
+
+  if (xd) { addi(d, d, xd); }
+  return 0;
+}
+
+#ifndef PRODUCT
+// Test of ppc assembler.
+void Assembler::test_asm() {
+  // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+  addi(   R0,  R1,  10);
+  addis(  R5,  R2,  11);
+  addic_( R3,  R31, 42);
+  subfic( R21, R12, 2112);
+  add(    R3,  R2,  R1);
+  add_(   R11, R22, R30);
+  subf(   R7,  R6,  R5);
+  subf_(  R8,  R9,  R4);
+  addc(   R11, R12, R13);
+  addc_(  R14, R14, R14);
+  subfc(  R15, R16, R17);
+  subfc_( R18, R20, R19);
+  adde(   R20, R22, R24);
+  adde_(  R29, R27, R26);
+  subfe(  R28, R1,  R0);
+  subfe_( R21, R11, R29);
+  neg(    R21, R22);
+  neg_(   R13, R23);
+  mulli(  R0,  R11, -31);
+  mulld(  R1,  R18, R21);
+  mulld_( R2,  R17, R22);
+  mullw(  R3,  R16, R23);
+  mullw_( R4,  R15, R24);
+  divd(   R5,  R14, R25);
+  divd_(  R6,  R13, R26);
+  divw(   R7,  R12, R27);
+  divw_(  R8,  R11, R28);
+
+  li(     R3, -4711);
+
+  // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+  cmpi(   CCR7,  0, R27, 4711);
+  cmp(    CCR0, 1, R14, R11);
+  cmpli(  CCR5,  1, R17, 45);
+  cmpl(   CCR3, 0, R9,  R10);
+
+  cmpwi(  CCR7,  R27, 4711);
+  cmpw(   CCR0, R14, R11);
+  cmplwi( CCR5,  R17, 45);
+  cmplw(  CCR3, R9,  R10);
+
+  cmpdi(  CCR7,  R27, 4711);
+  cmpd(   CCR0, R14, R11);
+  cmpldi( CCR5,  R17, 45);
+  cmpld(  CCR3, R9,  R10);
+
+  // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+  andi_(  R4,  R5,  0xff);
+  andis_( R12, R13, 0x7b51);
+  ori(    R1,  R4,  13);
+  oris(   R3,  R5,  177);
+  xori(   R7,  R6,  51);
+  xoris(  R29, R0,  1);
+  andr(   R17, R21, R16);
+  and_(   R3,  R5,  R15);
+  orr(    R2,  R1,  R9);
+  or_(    R17, R15, R11);
+  xorr(   R19, R18, R10);
+  xor_(   R31, R21, R11);
+  nand(   R5,  R7,  R3);
+  nand_(  R3,  R1,  R0);
+  nor(    R2,  R3,  R5);
+  nor_(   R3,  R6,  R8);
+  andc(   R25, R12, R11);
+  andc_(  R24, R22, R21);
+  orc(    R20, R10, R12);
+  orc_(   R22, R2,  R13);
+
+  nop();
+
+  // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+  sld(    R5,  R6,  R8);
+  sld_(   R3,  R5,  R9);
+  slw(    R2,  R1,  R10);
+  slw_(   R6,  R26, R16);
+  srd(    R16, R24, R8);
+  srd_(   R21, R14, R7);
+  srw(    R22, R25, R29);
+  srw_(   R5,  R18, R17);
+  srad(   R7,  R11, R0);
+  srad_(  R9,  R13, R1);
+  sraw(   R7,  R15, R2);
+  sraw_(  R4,  R17, R3);
+  sldi(   R3,  R18, 63);
+  sldi_(  R2,  R20, 30);
+  slwi(   R1,  R21, 30);
+  slwi_(  R7,  R23, 8);
+  srdi(   R0,  R19, 2);
+  srdi_(  R12, R24, 5);
+  srwi(   R13, R27, 6);
+  srwi_(  R14, R29, 7);
+  sradi(  R15, R30, 9);
+  sradi_( R16, R31, 19);
+  srawi(  R17, R31, 15);
+  srawi_( R18, R31, 12);
+
+  clrrdi( R3, R30, 5);
+  clrldi( R9, R10, 11);
+
+  rldicr( R19, R20, 13, 15);
+  rldicr_(R20, R20, 16, 14);
+  rldicl( R21, R21, 30, 33);
+  rldicl_(R22, R1,  20, 25);
+  rlwinm( R23, R2,  25, 10, 11);
+  rlwinm_(R24, R3,  12, 13, 14);
+
+  // PPC 1, section 3.3.2 Fixed-Point Load Instructions
+  lwzx(   R3,  R5, R7);
+  lwz(    R11,  0, R1);
+  lwzu(   R31, -4, R11);
+
+  lwax(   R3,  R5, R7);
+  lwa(    R31, -4, R11);
+  lhzx(   R3,  R5, R7);
+  lhz(    R31, -4, R11);
+  lhzu(   R31, -4, R11);
+
+
+  lhax(   R3,  R5, R7);
+  lha(    R31, -4, R11);
+  lhau(   R11,  0, R1);
+
+  lbzx(   R3,  R5, R7);
+  lbz(    R31, -4, R11);
+  lbzu(   R11,  0, R1);
+
+  ld(     R31, -4, R11);
+  ldx(    R3,  R5, R7);
+  ldu(    R31, -4, R11);
+
+  //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+  stwx(   R3,  R5, R7);
+  stw(    R31, -4, R11);
+  stwu(   R11,  0, R1);
+
+  sthx(   R3,  R5, R7 );
+  sth(    R31, -4, R11);
+  sthu(   R31, -4, R11);
+
+  stbx(   R3,  R5, R7);
+  stb(    R31, -4, R11);
+  stbu(   R31, -4, R11);
+
+  std(    R31, -4, R11);
+  stdx(   R3,  R5, R7);
+  stdu(   R31, -4, R11);
+
+ // PPC 1, section 3.3.13 Move To/From System Register Instructions
+  mtlr(   R3);
+  mflr(   R3);
+  mtctr(  R3);
+  mfctr(  R3);
+  mtcrf(  0xff, R15);
+  mtcr(   R15);
+  mtcrf(  0x03, R15);
+  mtcr(   R15);
+  mfcr(   R15);
+
+ // PPC 1, section 2.4.1 Branch Instructions
+  Label lbl1, lbl2, lbl3;
+  bind(lbl1);
+
+  b(pc());
+  b(pc() - 8);
+  b(lbl1);
+  b(lbl2);
+  b(lbl3);
+
+  bl(pc() - 8);
+  bl(lbl1);
+  bl(lbl2);
+
+  bcl(4, 10, pc() - 8);
+  bcl(4, 10, lbl1);
+  bcl(4, 10, lbl2);
+
+  bclr( 4, 6, 0);
+  bclrl(4, 6, 0);
+
+  bind(lbl2);
+
+  bcctr( 4, 6, 0);
+  bcctrl(4, 6, 0);
+
+  blt(CCR0, lbl2);
+  bgt(CCR1, lbl2);
+  beq(CCR2, lbl2);
+  bso(CCR3, lbl2);
+  bge(CCR4, lbl2);
+  ble(CCR5, lbl2);
+  bne(CCR6, lbl2);
+  bns(CCR7, lbl2);
+
+  bltl(CCR0, lbl2);
+  bgtl(CCR1, lbl2);
+  beql(CCR2, lbl2);
+  bsol(CCR3, lbl2);
+  bgel(CCR4, lbl2);
+  blel(CCR5, lbl2);
+  bnel(CCR6, lbl2);
+  bnsl(CCR7, lbl2);
+  blr();
+
+  sync();
+  icbi( R1, R2);
+  dcbst(R2, R3);
+
+  // FLOATING POINT instructions ppc.
+  // PPC 1, section 4.6.2 Floating-Point Load Instructions
+  lfs( F1, -11, R3);
+  lfsu(F2, 123, R4);
+  lfsx(F3, R5,  R6);
+  lfd( F4, 456, R7);
+  lfdu(F5, 789, R8);
+  lfdx(F6, R10, R11);
+
+  // PPC 1, section 4.6.3 Floating-Point Store Instructions
+  stfs(  F7,  876, R12);
+  stfsu( F8,  543, R13);
+  stfsx( F9,  R14, R15);
+  stfd(  F10, 210, R16);
+  stfdu( F11, 111, R17);
+  stfdx( F12, R18, R19);
+
+  // PPC 1, section 4.6.4 Floating-Point Move Instructions
+  fmr(   F13, F14);
+  fmr_(  F14, F15);
+  fneg(  F16, F17);
+  fneg_( F18, F19);
+  fabs(  F20, F21);
+  fabs_( F22, F23);
+  fnabs( F24, F25);
+  fnabs_(F26, F27);
+
+  // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
+  // Instructions
+  fadd(  F28, F29, F30);
+  fadd_( F31, F0,  F1);
+  fadds( F2,  F3,  F4);
+  fadds_(F5,  F6,  F7);
+  fsub(  F8,  F9,  F10);
+  fsub_( F11, F12, F13);
+  fsubs( F14, F15, F16);
+  fsubs_(F17, F18, F19);
+  fmul(  F20, F21, F22);
+  fmul_( F23, F24, F25);
+  fmuls( F26, F27, F28);
+  fmuls_(F29, F30, F31);
+  fdiv(  F0,  F1,  F2);
+  fdiv_( F3,  F4,  F5);
+  fdivs( F6,  F7,  F8);
+  fdivs_(F9,  F10, F11);
+
+  // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
+  // Instructions
+  frsp(  F12, F13);
+  fctid( F14, F15);
+  fctidz(F16, F17);
+  fctiw( F18, F19);
+  fctiwz(F20, F21);
+  fcfid( F22, F23);
+
+  // PPC 1, section 4.6.7 Floating-Point Compare Instructions
+  fcmpu( CCR7, F24, F25);
+
+  tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
+  code()->decode();
+}
+
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,1977 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_ASSEMBLER_PPC_HPP
+#define CPU_PPC_VM_ASSEMBLER_PPC_HPP
+
+#include "asm/register.hpp"
+
+// Address is an abstraction used to represent a memory location
+// as used in assembler instructions.
+// PPC instructions grok either baseReg + indexReg or baseReg + disp.
+// So far we do not use this as simplification by this class is low
+// on PPC with its simple addressing mode. Use RegisterOrConstant to
+// represent an offset.
+class Address VALUE_OBJ_CLASS_SPEC {
+};
+
+class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+ private:
+  address          _address;
+  RelocationHolder _rspec;
+
+  RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
+    switch (rtype) {
+    case relocInfo::external_word_type:
+      return external_word_Relocation::spec(addr);
+    case relocInfo::internal_word_type:
+      return internal_word_Relocation::spec(addr);
+    case relocInfo::opt_virtual_call_type:
+      return opt_virtual_call_Relocation::spec();
+    case relocInfo::static_call_type:
+      return static_call_Relocation::spec();
+    case relocInfo::runtime_call_type:
+      return runtime_call_Relocation::spec();
+    case relocInfo::none:
+      return RelocationHolder();
+    default:
+      ShouldNotReachHere();
+      return RelocationHolder();
+    }
+  }
+
+ protected:
+  // creation
+  AddressLiteral() : _address(NULL), _rspec(NULL) {}
+
+ public:
+  AddressLiteral(address addr, RelocationHolder const& rspec)
+    : _address(addr),
+      _rspec(rspec) {}
+
+  AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+  AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+  intptr_t value() const { return (intptr_t) _address; }
+
+  const RelocationHolder& rspec() const { return _rspec; }
+};
+
+// Argument is an abstraction used to represent an outgoing
+// actual argument or an incoming formal parameter, whether
+// it resides in memory or in a register, in a manner consistent
+// with the PPC Application Binary Interface, or ABI. This is
+// often referred to as the native or C calling convention.
+
+class Argument VALUE_OBJ_CLASS_SPEC {
+ private:
+  int _number;  // The number of the argument.
+ public:
+  enum {
+    // Only 8 registers may contain integer parameters.
+    n_register_parameters = 8,
+    // Can have up to 8 floating registers.
+    n_float_register_parameters = 8,
+
+    // PPC C calling conventions.
+    // The first eight arguments are passed in int regs if they are int.
+    n_int_register_parameters_c = 8,
+    // The first thirteen float arguments are passed in float regs.
+    n_float_register_parameters_c = 13,
+    // Only the first 8 parameters are not placed on the stack. Aix disassembly
+    // shows that xlC places all float args after argument 8 on the stack AND
+    // in a register. This is not documented, but we follow this convention, too.
+    n_regs_not_on_stack_c = 8,
+  };
+  // creation
+  Argument(int number) : _number(number) {}
+
+  int  number() const { return _number; }
+
+  // Locating register-based arguments:
+  bool is_register() const { return _number < n_register_parameters; }
+
+  Register as_register() const {
+    assert(is_register(), "must be a register argument");
+    return as_Register(number() + R3_ARG1->encoding());
+  }
+};
+
+#if !defined(ABI_ELFv2)
+// A ppc64 function descriptor.
+struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
+ private:
+  address _entry;
+  address _toc;
+  address _env;
+
+ public:
+  inline address entry() const { return _entry; }
+  inline address toc()   const { return _toc; }
+  inline address env()   const { return _env; }
+
+  inline void set_entry(address entry) { _entry = entry; }
+  inline void set_toc(  address toc)   { _toc   = toc; }
+  inline void set_env(  address env)   { _env   = env; }
+
+  inline static ByteSize entry_offset() { return byte_offset_of(FunctionDescriptor, _entry); }
+  inline static ByteSize toc_offset()   { return byte_offset_of(FunctionDescriptor, _toc); }
+  inline static ByteSize env_offset()   { return byte_offset_of(FunctionDescriptor, _env); }
+
+  // Friend functions can be called without loading toc and env.
+  enum {
+    friend_toc = 0xcafe,
+    friend_env = 0xc0de
+  };
+
+  inline bool is_friend_function() const {
+    return (toc() == (address) friend_toc) && (env() == (address) friend_env);
+  }
+
+  // Constructor for stack-allocated instances.
+  FunctionDescriptor() {
+    _entry = (address) 0xbad;
+    _toc   = (address) 0xbad;
+    _env   = (address) 0xbad;
+  }
+};
+#endif
+
+class Assembler : public AbstractAssembler {
+ protected:
+  // Displacement routines
+  static void print_instruction(int inst);
+  static int  patched_branch(int dest_pos, int inst, int inst_pos);
+  static int  branch_destination(int inst, int pos);
+
+  friend class AbstractAssembler;
+
+  // Code patchers need various routines like inv_wdisp()
+  friend class NativeInstruction;
+  friend class NativeGeneralJump;
+  friend class Relocation;
+
+ public:
+
+  enum shifts {
+    XO_21_29_SHIFT = 2,
+    XO_21_30_SHIFT = 1,
+    XO_27_29_SHIFT = 2,
+    XO_30_31_SHIFT = 0,
+    SPR_5_9_SHIFT  = 11u, // SPR_5_9 field in bits 11 -- 15
+    SPR_0_4_SHIFT  = 16u, // SPR_0_4 field in bits 16 -- 20
+    RS_SHIFT       = 21u, // RS field in bits 21 -- 25
+    OPCODE_SHIFT   = 26u, // opcode in bits 26 -- 31
+  };
+
+  enum opcdxos_masks {
+    XL_FORM_OPCODE_MASK = (63u << OPCODE_SHIFT) | (1023u << 1),
+    ADDI_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    ADDIS_OPCODE_MASK   = (63u << OPCODE_SHIFT),
+    BXX_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    BCXX_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    // trap instructions
+    TDI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    TWI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    TD_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (1023u << 1),
+    TW_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (1023u << 1),
+    LD_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (3u << XO_30_31_SHIFT), // DS-FORM
+    STD_OPCODE_MASK     = LD_OPCODE_MASK,
+    STDU_OPCODE_MASK    = STD_OPCODE_MASK,
+    STDX_OPCODE_MASK    = (63u << OPCODE_SHIFT) | (1023u << 1),
+    STDUX_OPCODE_MASK   = STDX_OPCODE_MASK,
+    STW_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    STWU_OPCODE_MASK    = STW_OPCODE_MASK,
+    STWX_OPCODE_MASK    = (63u << OPCODE_SHIFT) | (1023u << 1),
+    STWUX_OPCODE_MASK   = STWX_OPCODE_MASK,
+    MTCTR_OPCODE_MASK   = ~(31u << RS_SHIFT),
+    ORI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    ORIS_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    RLDICR_OPCODE_MASK  = (63u << OPCODE_SHIFT) | (7u << XO_27_29_SHIFT)
+  };
+
+  enum opcdxos {
+    ADD_OPCODE    = (31u << OPCODE_SHIFT | 266u << 1),
+    ADDC_OPCODE   = (31u << OPCODE_SHIFT |  10u << 1),
+    ADDI_OPCODE   = (14u << OPCODE_SHIFT),
+    ADDIS_OPCODE  = (15u << OPCODE_SHIFT),
+    ADDIC__OPCODE = (13u << OPCODE_SHIFT),
+    ADDE_OPCODE   = (31u << OPCODE_SHIFT | 138u << 1),
+    SUBF_OPCODE   = (31u << OPCODE_SHIFT |  40u << 1),
+    SUBFC_OPCODE  = (31u << OPCODE_SHIFT |   8u << 1),
+    SUBFE_OPCODE  = (31u << OPCODE_SHIFT | 136u << 1),
+    SUBFIC_OPCODE = (8u  << OPCODE_SHIFT),
+    SUBFZE_OPCODE = (31u << OPCODE_SHIFT | 200u << 1),
+    DIVW_OPCODE   = (31u << OPCODE_SHIFT | 491u << 1),
+    MULLW_OPCODE  = (31u << OPCODE_SHIFT | 235u << 1),
+    MULHW_OPCODE  = (31u << OPCODE_SHIFT |  75u << 1),
+    MULHWU_OPCODE = (31u << OPCODE_SHIFT |  11u << 1),
+    MULLI_OPCODE  = (7u  << OPCODE_SHIFT),
+    AND_OPCODE    = (31u << OPCODE_SHIFT |  28u << 1),
+    ANDI_OPCODE   = (28u << OPCODE_SHIFT),
+    ANDIS_OPCODE  = (29u << OPCODE_SHIFT),
+    ANDC_OPCODE   = (31u << OPCODE_SHIFT |  60u << 1),
+    ORC_OPCODE    = (31u << OPCODE_SHIFT | 412u << 1),
+    OR_OPCODE     = (31u << OPCODE_SHIFT | 444u << 1),
+    ORI_OPCODE    = (24u << OPCODE_SHIFT),
+    ORIS_OPCODE   = (25u << OPCODE_SHIFT),
+    XOR_OPCODE    = (31u << OPCODE_SHIFT | 316u << 1),
+    XORI_OPCODE   = (26u << OPCODE_SHIFT),
+    XORIS_OPCODE  = (27u << OPCODE_SHIFT),
+
+    NEG_OPCODE    = (31u << OPCODE_SHIFT | 104u << 1),
+
+    RLWINM_OPCODE = (21u << OPCODE_SHIFT),
+    CLRRWI_OPCODE = RLWINM_OPCODE,
+    CLRLWI_OPCODE = RLWINM_OPCODE,
+
+    RLWIMI_OPCODE = (20u << OPCODE_SHIFT),
+
+    SLW_OPCODE    = (31u << OPCODE_SHIFT |  24u << 1),
+    SLWI_OPCODE   = RLWINM_OPCODE,
+    SRW_OPCODE    = (31u << OPCODE_SHIFT | 536u << 1),
+    SRWI_OPCODE   = RLWINM_OPCODE,
+    SRAW_OPCODE   = (31u << OPCODE_SHIFT | 792u << 1),
+    SRAWI_OPCODE  = (31u << OPCODE_SHIFT | 824u << 1),
+
+    CMP_OPCODE    = (31u << OPCODE_SHIFT |   0u << 1),
+    CMPI_OPCODE   = (11u << OPCODE_SHIFT),
+    CMPL_OPCODE   = (31u << OPCODE_SHIFT |  32u << 1),
+    CMPLI_OPCODE  = (10u << OPCODE_SHIFT),
+
+    ISEL_OPCODE   = (31u << OPCODE_SHIFT |  15u << 1),
+
+    MTLR_OPCODE   = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
+    MFLR_OPCODE   = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
+
+    MTCRF_OPCODE  = (31u << OPCODE_SHIFT | 144u << 1),
+    MFCR_OPCODE   = (31u << OPCODE_SHIFT | 19u << 1),
+    MCRF_OPCODE   = (19u << OPCODE_SHIFT | 0u << 1),
+
+    // condition register logic instructions
+    CRAND_OPCODE  = (19u << OPCODE_SHIFT | 257u << 1),
+    CRNAND_OPCODE = (19u << OPCODE_SHIFT | 225u << 1),
+    CROR_OPCODE   = (19u << OPCODE_SHIFT | 449u << 1),
+    CRXOR_OPCODE  = (19u << OPCODE_SHIFT | 193u << 1),
+    CRNOR_OPCODE  = (19u << OPCODE_SHIFT |  33u << 1),
+    CREQV_OPCODE  = (19u << OPCODE_SHIFT | 289u << 1),
+    CRANDC_OPCODE = (19u << OPCODE_SHIFT | 129u << 1),
+    CRORC_OPCODE  = (19u << OPCODE_SHIFT | 417u << 1),
+
+    BCLR_OPCODE   = (19u << OPCODE_SHIFT | 16u << 1),
+    BXX_OPCODE      = (18u << OPCODE_SHIFT),
+    BCXX_OPCODE     = (16u << OPCODE_SHIFT),
+
+    // CTR-related opcodes
+    BCCTR_OPCODE  = (19u << OPCODE_SHIFT | 528u << 1),
+    MTCTR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
+    MFCTR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
+
+
+    LWZ_OPCODE   = (32u << OPCODE_SHIFT),
+    LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
+    LWZU_OPCODE  = (33u << OPCODE_SHIFT),
+
+    LHA_OPCODE   = (42u << OPCODE_SHIFT),
+    LHAX_OPCODE  = (31u << OPCODE_SHIFT | 343u << 1),
+    LHAU_OPCODE  = (43u << OPCODE_SHIFT),
+
+    LHZ_OPCODE   = (40u << OPCODE_SHIFT),
+    LHZX_OPCODE  = (31u << OPCODE_SHIFT | 279u << 1),
+    LHZU_OPCODE  = (41u << OPCODE_SHIFT),
+
+    LBZ_OPCODE   = (34u << OPCODE_SHIFT),
+    LBZX_OPCODE  = (31u << OPCODE_SHIFT |  87u << 1),
+    LBZU_OPCODE  = (35u << OPCODE_SHIFT),
+
+    STW_OPCODE   = (36u << OPCODE_SHIFT),
+    STWX_OPCODE  = (31u << OPCODE_SHIFT | 151u << 1),
+    STWU_OPCODE  = (37u << OPCODE_SHIFT),
+    STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
+
+    STH_OPCODE   = (44u << OPCODE_SHIFT),
+    STHX_OPCODE  = (31u << OPCODE_SHIFT | 407u << 1),
+    STHU_OPCODE  = (45u << OPCODE_SHIFT),
+
+    STB_OPCODE   = (38u << OPCODE_SHIFT),
+    STBX_OPCODE  = (31u << OPCODE_SHIFT | 215u << 1),
+    STBU_OPCODE  = (39u << OPCODE_SHIFT),
+
+    EXTSB_OPCODE = (31u << OPCODE_SHIFT | 954u << 1),
+    EXTSH_OPCODE = (31u << OPCODE_SHIFT | 922u << 1),
+    EXTSW_OPCODE = (31u << OPCODE_SHIFT | 986u << 1),               // X-FORM
+
+    // 32 bit opcode encodings
+
+    LWA_OPCODE    = (58u << OPCODE_SHIFT |   2u << XO_30_31_SHIFT), // DS-FORM
+    LWAX_OPCODE   = (31u << OPCODE_SHIFT | 341u << XO_21_30_SHIFT), // X-FORM
+
+    CNTLZW_OPCODE = (31u << OPCODE_SHIFT |  26u << XO_21_30_SHIFT), // X-FORM
+
+    // 64 bit opcode encodings
+
+    LD_OPCODE     = (58u << OPCODE_SHIFT |   0u << XO_30_31_SHIFT), // DS-FORM
+    LDU_OPCODE    = (58u << OPCODE_SHIFT |   1u << XO_30_31_SHIFT), // DS-FORM
+    LDX_OPCODE    = (31u << OPCODE_SHIFT |  21u << XO_21_30_SHIFT), // X-FORM
+
+    STD_OPCODE    = (62u << OPCODE_SHIFT |   0u << XO_30_31_SHIFT), // DS-FORM
+    STDU_OPCODE   = (62u << OPCODE_SHIFT |   1u << XO_30_31_SHIFT), // DS-FORM
+    STDUX_OPCODE  = (31u << OPCODE_SHIFT | 181u << 1),                  // X-FORM
+    STDX_OPCODE   = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
+
+    RLDICR_OPCODE = (30u << OPCODE_SHIFT |   1u << XO_27_29_SHIFT), // MD-FORM
+    RLDICL_OPCODE = (30u << OPCODE_SHIFT |   0u << XO_27_29_SHIFT), // MD-FORM
+    RLDIC_OPCODE  = (30u << OPCODE_SHIFT |   2u << XO_27_29_SHIFT), // MD-FORM
+    RLDIMI_OPCODE = (30u << OPCODE_SHIFT |   3u << XO_27_29_SHIFT), // MD-FORM
+
+    SRADI_OPCODE  = (31u << OPCODE_SHIFT | 413u << XO_21_29_SHIFT), // XS-FORM
+
+    SLD_OPCODE    = (31u << OPCODE_SHIFT |  27u << 1),              // X-FORM
+    SRD_OPCODE    = (31u << OPCODE_SHIFT | 539u << 1),              // X-FORM
+    SRAD_OPCODE   = (31u << OPCODE_SHIFT | 794u << 1),              // X-FORM
+
+    MULLD_OPCODE  = (31u << OPCODE_SHIFT | 233u << 1),              // XO-FORM
+    MULHD_OPCODE  = (31u << OPCODE_SHIFT |  73u << 1),              // XO-FORM
+    MULHDU_OPCODE = (31u << OPCODE_SHIFT |   9u << 1),              // XO-FORM
+    DIVD_OPCODE   = (31u << OPCODE_SHIFT | 489u << 1),              // XO-FORM
+
+    CNTLZD_OPCODE = (31u << OPCODE_SHIFT |  58u << XO_21_30_SHIFT), // X-FORM
+    NAND_OPCODE   = (31u << OPCODE_SHIFT | 476u << XO_21_30_SHIFT), // X-FORM
+    NOR_OPCODE    = (31u << OPCODE_SHIFT | 124u << XO_21_30_SHIFT), // X-FORM
+
+
+    // opcodes only used for floating arithmetic
+    FADD_OPCODE   = (63u << OPCODE_SHIFT |  21u << 1),
+    FADDS_OPCODE  = (59u << OPCODE_SHIFT |  21u << 1),
+    FCMPU_OPCODE  = (63u << OPCODE_SHIFT |  00u << 1),
+    FDIV_OPCODE   = (63u << OPCODE_SHIFT |  18u << 1),
+    FDIVS_OPCODE  = (59u << OPCODE_SHIFT |  18u << 1),
+    FMR_OPCODE    = (63u << OPCODE_SHIFT |  72u << 1),
+    // These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
+    // on Power7.  Do not use.
+    // MFFGPR_OPCODE  = (31u << OPCODE_SHIFT | 607u << 1),
+    // MFTGPR_OPCODE  = (31u << OPCODE_SHIFT | 735u << 1),
+    CMPB_OPCODE    = (31u << OPCODE_SHIFT |  508  << 1),
+    POPCNTB_OPCODE = (31u << OPCODE_SHIFT |  122  << 1),
+    POPCNTW_OPCODE = (31u << OPCODE_SHIFT |  378  << 1),
+    POPCNTD_OPCODE = (31u << OPCODE_SHIFT |  506  << 1),
+    FABS_OPCODE    = (63u << OPCODE_SHIFT |  264u << 1),
+    FNABS_OPCODE   = (63u << OPCODE_SHIFT |  136u << 1),
+    FMUL_OPCODE    = (63u << OPCODE_SHIFT |   25u << 1),
+    FMULS_OPCODE   = (59u << OPCODE_SHIFT |   25u << 1),
+    FNEG_OPCODE    = (63u << OPCODE_SHIFT |   40u << 1),
+    FSUB_OPCODE    = (63u << OPCODE_SHIFT |   20u << 1),
+    FSUBS_OPCODE   = (59u << OPCODE_SHIFT |   20u << 1),
+
+    // PPC64-internal FPU conversion opcodes
+    FCFID_OPCODE   = (63u << OPCODE_SHIFT |  846u << 1),
+    FCFIDS_OPCODE  = (59u << OPCODE_SHIFT |  846u << 1),
+    FCTID_OPCODE   = (63u << OPCODE_SHIFT |  814u << 1),
+    FCTIDZ_OPCODE  = (63u << OPCODE_SHIFT |  815u << 1),
+    FCTIW_OPCODE   = (63u << OPCODE_SHIFT |   14u << 1),
+    FCTIWZ_OPCODE  = (63u << OPCODE_SHIFT |   15u << 1),
+    FRSP_OPCODE    = (63u << OPCODE_SHIFT |   12u << 1),
+
+    // WARNING: using fmadd results in a non-compliant vm. Some floating
+    // point tck tests will fail.
+    FMADD_OPCODE   = (59u << OPCODE_SHIFT |   29u << 1),
+    DMADD_OPCODE   = (63u << OPCODE_SHIFT |   29u << 1),
+    FMSUB_OPCODE   = (59u << OPCODE_SHIFT |   28u << 1),
+    DMSUB_OPCODE   = (63u << OPCODE_SHIFT |   28u << 1),
+    FNMADD_OPCODE  = (59u << OPCODE_SHIFT |   31u << 1),
+    DNMADD_OPCODE  = (63u << OPCODE_SHIFT |   31u << 1),
+    FNMSUB_OPCODE  = (59u << OPCODE_SHIFT |   30u << 1),
+    DNMSUB_OPCODE  = (63u << OPCODE_SHIFT |   30u << 1),
+
+    LFD_OPCODE     = (50u << OPCODE_SHIFT |   00u << 1),
+    LFDU_OPCODE    = (51u << OPCODE_SHIFT |   00u << 1),
+    LFDX_OPCODE    = (31u << OPCODE_SHIFT |  599u << 1),
+    LFS_OPCODE     = (48u << OPCODE_SHIFT |   00u << 1),
+    LFSU_OPCODE    = (49u << OPCODE_SHIFT |   00u << 1),
+    LFSX_OPCODE    = (31u << OPCODE_SHIFT |  535u << 1),
+
+    STFD_OPCODE    = (54u << OPCODE_SHIFT |   00u << 1),
+    STFDU_OPCODE   = (55u << OPCODE_SHIFT |   00u << 1),
+    STFDX_OPCODE   = (31u << OPCODE_SHIFT |  727u << 1),
+    STFS_OPCODE    = (52u << OPCODE_SHIFT |   00u << 1),
+    STFSU_OPCODE   = (53u << OPCODE_SHIFT |   00u << 1),
+    STFSX_OPCODE   = (31u << OPCODE_SHIFT |  663u << 1),
+
+    FSQRT_OPCODE   = (63u << OPCODE_SHIFT |   22u << 1),            // A-FORM
+    FSQRTS_OPCODE  = (59u << OPCODE_SHIFT |   22u << 1),            // A-FORM
+
+    // Vector instruction support for >= Power6
+    // Vector Storage Access
+    LVEBX_OPCODE   = (31u << OPCODE_SHIFT |    7u << 1),
+    LVEHX_OPCODE   = (31u << OPCODE_SHIFT |   39u << 1),
+    LVEWX_OPCODE   = (31u << OPCODE_SHIFT |   71u << 1),
+    LVX_OPCODE     = (31u << OPCODE_SHIFT |  103u << 1),
+    LVXL_OPCODE    = (31u << OPCODE_SHIFT |  359u << 1),
+    STVEBX_OPCODE  = (31u << OPCODE_SHIFT |  135u << 1),
+    STVEHX_OPCODE  = (31u << OPCODE_SHIFT |  167u << 1),
+    STVEWX_OPCODE  = (31u << OPCODE_SHIFT |  199u << 1),
+    STVX_OPCODE    = (31u << OPCODE_SHIFT |  231u << 1),
+    STVXL_OPCODE   = (31u << OPCODE_SHIFT |  487u << 1),
+    LVSL_OPCODE    = (31u << OPCODE_SHIFT |    6u << 1),
+    LVSR_OPCODE    = (31u << OPCODE_SHIFT |   38u << 1),
+
+    // Vector Permute and Formatting
+    VPKPX_OPCODE   = (4u  << OPCODE_SHIFT |  782u     ),
+    VPKSHSS_OPCODE = (4u  << OPCODE_SHIFT |  398u     ),
+    VPKSWSS_OPCODE = (4u  << OPCODE_SHIFT |  462u     ),
+    VPKSHUS_OPCODE = (4u  << OPCODE_SHIFT |  270u     ),
+    VPKSWUS_OPCODE = (4u  << OPCODE_SHIFT |  334u     ),
+    VPKUHUM_OPCODE = (4u  << OPCODE_SHIFT |   14u     ),
+    VPKUWUM_OPCODE = (4u  << OPCODE_SHIFT |   78u     ),
+    VPKUHUS_OPCODE = (4u  << OPCODE_SHIFT |  142u     ),
+    VPKUWUS_OPCODE = (4u  << OPCODE_SHIFT |  206u     ),
+    VUPKHPX_OPCODE = (4u  << OPCODE_SHIFT |  846u     ),
+    VUPKHSB_OPCODE = (4u  << OPCODE_SHIFT |  526u     ),
+    VUPKHSH_OPCODE = (4u  << OPCODE_SHIFT |  590u     ),
+    VUPKLPX_OPCODE = (4u  << OPCODE_SHIFT |  974u     ),
+    VUPKLSB_OPCODE = (4u  << OPCODE_SHIFT |  654u     ),
+    VUPKLSH_OPCODE = (4u  << OPCODE_SHIFT |  718u     ),
+
+    VMRGHB_OPCODE  = (4u  << OPCODE_SHIFT |   12u     ),
+    VMRGHW_OPCODE  = (4u  << OPCODE_SHIFT |  140u     ),
+    VMRGHH_OPCODE  = (4u  << OPCODE_SHIFT |   76u     ),
+    VMRGLB_OPCODE  = (4u  << OPCODE_SHIFT |  268u     ),
+    VMRGLW_OPCODE  = (4u  << OPCODE_SHIFT |  396u     ),
+    VMRGLH_OPCODE  = (4u  << OPCODE_SHIFT |  332u     ),
+
+    VSPLT_OPCODE   = (4u  << OPCODE_SHIFT |  524u     ),
+    VSPLTH_OPCODE  = (4u  << OPCODE_SHIFT |  588u     ),
+    VSPLTW_OPCODE  = (4u  << OPCODE_SHIFT |  652u     ),
+    VSPLTISB_OPCODE= (4u  << OPCODE_SHIFT |  780u     ),
+    VSPLTISH_OPCODE= (4u  << OPCODE_SHIFT |  844u     ),
+    VSPLTISW_OPCODE= (4u  << OPCODE_SHIFT |  908u     ),
+
+    VPERM_OPCODE   = (4u  << OPCODE_SHIFT |   43u     ),
+    VSEL_OPCODE    = (4u  << OPCODE_SHIFT |   42u     ),
+
+    VSL_OPCODE     = (4u  << OPCODE_SHIFT |  452u     ),
+    VSLDOI_OPCODE  = (4u  << OPCODE_SHIFT |   44u     ),
+    VSLO_OPCODE    = (4u  << OPCODE_SHIFT | 1036u     ),
+    VSR_OPCODE     = (4u  << OPCODE_SHIFT |  708u     ),
+    VSRO_OPCODE    = (4u  << OPCODE_SHIFT | 1100u     ),
+
+    // Vector Integer
+    VADDCUW_OPCODE = (4u  << OPCODE_SHIFT |  384u     ),
+    VADDSHS_OPCODE = (4u  << OPCODE_SHIFT |  832u     ),
+    VADDSBS_OPCODE = (4u  << OPCODE_SHIFT |  768u     ),
+    VADDSWS_OPCODE = (4u  << OPCODE_SHIFT |  896u     ),
+    VADDUBM_OPCODE = (4u  << OPCODE_SHIFT |    0u     ),
+    VADDUWM_OPCODE = (4u  << OPCODE_SHIFT |  128u     ),
+    VADDUHM_OPCODE = (4u  << OPCODE_SHIFT |   64u     ),
+    VADDUBS_OPCODE = (4u  << OPCODE_SHIFT |  512u     ),
+    VADDUWS_OPCODE = (4u  << OPCODE_SHIFT |  640u     ),
+    VADDUHS_OPCODE = (4u  << OPCODE_SHIFT |  576u     ),
+    VSUBCUW_OPCODE = (4u  << OPCODE_SHIFT | 1408u     ),
+    VSUBSHS_OPCODE = (4u  << OPCODE_SHIFT | 1856u     ),
+    VSUBSBS_OPCODE = (4u  << OPCODE_SHIFT | 1792u     ),
+    VSUBSWS_OPCODE = (4u  << OPCODE_SHIFT | 1920u     ),
+    VSUBUBM_OPCODE = (4u  << OPCODE_SHIFT | 1024u     ),
+    VSUBUWM_OPCODE = (4u  << OPCODE_SHIFT | 1152u     ),
+    VSUBUHM_OPCODE = (4u  << OPCODE_SHIFT | 1088u     ),
+    VSUBUBS_OPCODE = (4u  << OPCODE_SHIFT | 1536u     ),
+    VSUBUWS_OPCODE = (4u  << OPCODE_SHIFT | 1664u     ),
+    VSUBUHS_OPCODE = (4u  << OPCODE_SHIFT | 1600u     ),
+
+    VMULESB_OPCODE = (4u  << OPCODE_SHIFT |  776u     ),
+    VMULEUB_OPCODE = (4u  << OPCODE_SHIFT |  520u     ),
+    VMULESH_OPCODE = (4u  << OPCODE_SHIFT |  840u     ),
+    VMULEUH_OPCODE = (4u  << OPCODE_SHIFT |  584u     ),
+    VMULOSB_OPCODE = (4u  << OPCODE_SHIFT |  264u     ),
+    VMULOUB_OPCODE = (4u  << OPCODE_SHIFT |    8u     ),
+    VMULOSH_OPCODE = (4u  << OPCODE_SHIFT |  328u     ),
+    VMULOUH_OPCODE = (4u  << OPCODE_SHIFT |   72u     ),
+    VMHADDSHS_OPCODE=(4u  << OPCODE_SHIFT |   32u     ),
+    VMHRADDSHS_OPCODE=(4u << OPCODE_SHIFT |   33u     ),
+    VMLADDUHM_OPCODE=(4u  << OPCODE_SHIFT |   34u     ),
+    VMSUBUHM_OPCODE= (4u  << OPCODE_SHIFT |   36u     ),
+    VMSUMMBM_OPCODE= (4u  << OPCODE_SHIFT |   37u     ),
+    VMSUMSHM_OPCODE= (4u  << OPCODE_SHIFT |   40u     ),
+    VMSUMSHS_OPCODE= (4u  << OPCODE_SHIFT |   41u     ),
+    VMSUMUHM_OPCODE= (4u  << OPCODE_SHIFT |   38u     ),
+    VMSUMUHS_OPCODE= (4u  << OPCODE_SHIFT |   39u     ),
+
+    VSUMSWS_OPCODE = (4u  << OPCODE_SHIFT | 1928u     ),
+    VSUM2SWS_OPCODE= (4u  << OPCODE_SHIFT | 1672u     ),
+    VSUM4SBS_OPCODE= (4u  << OPCODE_SHIFT | 1800u     ),
+    VSUM4UBS_OPCODE= (4u  << OPCODE_SHIFT | 1544u     ),
+    VSUM4SHS_OPCODE= (4u  << OPCODE_SHIFT | 1608u     ),
+
+    VAVGSB_OPCODE  = (4u  << OPCODE_SHIFT | 1282u     ),
+    VAVGSW_OPCODE  = (4u  << OPCODE_SHIFT | 1410u     ),
+    VAVGSH_OPCODE  = (4u  << OPCODE_SHIFT | 1346u     ),
+    VAVGUB_OPCODE  = (4u  << OPCODE_SHIFT | 1026u     ),
+    VAVGUW_OPCODE  = (4u  << OPCODE_SHIFT | 1154u     ),
+    VAVGUH_OPCODE  = (4u  << OPCODE_SHIFT | 1090u     ),
+
+    VMAXSB_OPCODE  = (4u  << OPCODE_SHIFT |  258u     ),
+    VMAXSW_OPCODE  = (4u  << OPCODE_SHIFT |  386u     ),
+    VMAXSH_OPCODE  = (4u  << OPCODE_SHIFT |  322u     ),
+    VMAXUB_OPCODE  = (4u  << OPCODE_SHIFT |    2u     ),
+    VMAXUW_OPCODE  = (4u  << OPCODE_SHIFT |  130u     ),
+    VMAXUH_OPCODE  = (4u  << OPCODE_SHIFT |   66u     ),
+    VMINSB_OPCODE  = (4u  << OPCODE_SHIFT |  770u     ),
+    VMINSW_OPCODE  = (4u  << OPCODE_SHIFT |  898u     ),
+    VMINSH_OPCODE  = (4u  << OPCODE_SHIFT |  834u     ),
+    VMINUB_OPCODE  = (4u  << OPCODE_SHIFT |  514u     ),
+    VMINUW_OPCODE  = (4u  << OPCODE_SHIFT |  642u     ),
+    VMINUH_OPCODE  = (4u  << OPCODE_SHIFT |  578u     ),
+
+    VCMPEQUB_OPCODE= (4u  << OPCODE_SHIFT |    6u     ),
+    VCMPEQUH_OPCODE= (4u  << OPCODE_SHIFT |   70u     ),
+    VCMPEQUW_OPCODE= (4u  << OPCODE_SHIFT |  134u     ),
+    VCMPGTSH_OPCODE= (4u  << OPCODE_SHIFT |  838u     ),
+    VCMPGTSB_OPCODE= (4u  << OPCODE_SHIFT |  774u     ),
+    VCMPGTSW_OPCODE= (4u  << OPCODE_SHIFT |  902u     ),
+    VCMPGTUB_OPCODE= (4u  << OPCODE_SHIFT |  518u     ),
+    VCMPGTUH_OPCODE= (4u  << OPCODE_SHIFT |  582u     ),
+    VCMPGTUW_OPCODE= (4u  << OPCODE_SHIFT |  646u     ),
+
+    VAND_OPCODE    = (4u  << OPCODE_SHIFT | 1028u     ),
+    VANDC_OPCODE   = (4u  << OPCODE_SHIFT | 1092u     ),
+    VNOR_OPCODE    = (4u  << OPCODE_SHIFT | 1284u     ),
+    VOR_OPCODE     = (4u  << OPCODE_SHIFT | 1156u     ),
+    VXOR_OPCODE    = (4u  << OPCODE_SHIFT | 1220u     ),
+    VRLB_OPCODE    = (4u  << OPCODE_SHIFT |    4u     ),
+    VRLW_OPCODE    = (4u  << OPCODE_SHIFT |  132u     ),
+    VRLH_OPCODE    = (4u  << OPCODE_SHIFT |   68u     ),
+    VSLB_OPCODE    = (4u  << OPCODE_SHIFT |  260u     ),
+    VSKW_OPCODE    = (4u  << OPCODE_SHIFT |  388u     ),
+    VSLH_OPCODE    = (4u  << OPCODE_SHIFT |  324u     ),
+    VSRB_OPCODE    = (4u  << OPCODE_SHIFT |  516u     ),
+    VSRW_OPCODE    = (4u  << OPCODE_SHIFT |  644u     ),
+    VSRH_OPCODE    = (4u  << OPCODE_SHIFT |  580u     ),
+    VSRAB_OPCODE   = (4u  << OPCODE_SHIFT |  772u     ),
+    VSRAW_OPCODE   = (4u  << OPCODE_SHIFT |  900u     ),
+    VSRAH_OPCODE   = (4u  << OPCODE_SHIFT |  836u     ),
+
+    // Vector Floating-Point
+    // not implemented yet
+
+    // Vector Status and Control
+    MTVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1604u     ),
+    MFVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1540u     ),
+
+    // Icache and dcache related instructions
+    DCBA_OPCODE    = (31u << OPCODE_SHIFT |  758u << 1),
+    DCBZ_OPCODE    = (31u << OPCODE_SHIFT | 1014u << 1),
+    DCBST_OPCODE   = (31u << OPCODE_SHIFT |   54u << 1),
+    DCBF_OPCODE    = (31u << OPCODE_SHIFT |   86u << 1),
+
+    DCBT_OPCODE    = (31u << OPCODE_SHIFT |  278u << 1),
+    DCBTST_OPCODE  = (31u << OPCODE_SHIFT |  246u << 1),
+    ICBI_OPCODE    = (31u << OPCODE_SHIFT |  982u << 1),
+
+    // Instruction synchronization
+    ISYNC_OPCODE   = (19u << OPCODE_SHIFT |  150u << 1),
+    // Memory barriers
+    SYNC_OPCODE    = (31u << OPCODE_SHIFT |  598u << 1),
+    EIEIO_OPCODE   = (31u << OPCODE_SHIFT |  854u << 1),
+
+    // Trap instructions
+    TDI_OPCODE     = (2u  << OPCODE_SHIFT),
+    TWI_OPCODE     = (3u  << OPCODE_SHIFT),
+    TD_OPCODE      = (31u << OPCODE_SHIFT |   68u << 1),
+    TW_OPCODE      = (31u << OPCODE_SHIFT |    4u << 1),
+
+    // Atomics.
+    LWARX_OPCODE   = (31u << OPCODE_SHIFT |   20u << 1),
+    LDARX_OPCODE   = (31u << OPCODE_SHIFT |   84u << 1),
+    STWCX_OPCODE   = (31u << OPCODE_SHIFT |  150u << 1),
+    STDCX_OPCODE   = (31u << OPCODE_SHIFT |  214u << 1)
+
+  };
+
+  // Trap instructions TO bits
+  enum trap_to_bits {
+    // single bits
+    traptoLessThanSigned      = 1 << 4, // 0, left end
+    traptoGreaterThanSigned   = 1 << 3,
+    traptoEqual               = 1 << 2,
+    traptoLessThanUnsigned    = 1 << 1,
+    traptoGreaterThanUnsigned = 1 << 0, // 4, right end
+
+    // compound ones
+    traptoUnconditional       = (traptoLessThanSigned |
+                                 traptoGreaterThanSigned |
+                                 traptoEqual |
+                                 traptoLessThanUnsigned |
+                                 traptoGreaterThanUnsigned)
+  };
+
+  // Branch hints BH field
+  enum branch_hint_bh {
+    // bclr cases:
+    bhintbhBCLRisReturn            = 0,
+    bhintbhBCLRisNotReturnButSame  = 1,
+    bhintbhBCLRisNotPredictable    = 3,
+
+    // bcctr cases:
+    bhintbhBCCTRisNotReturnButSame = 0,
+    bhintbhBCCTRisNotPredictable   = 3
+  };
+
+  // Branch prediction hints AT field
+  enum branch_hint_at {
+    bhintatNoHint     = 0,  // at=00
+    bhintatIsNotTaken = 2,  // at=10
+    bhintatIsTaken    = 3   // at=11
+  };
+
+  // Branch prediction hints
+  enum branch_hint_concept {
+    // Use the same encoding as branch_hint_at to simply code.
+    bhintNoHint       = bhintatNoHint,
+    bhintIsNotTaken   = bhintatIsNotTaken,
+    bhintIsTaken      = bhintatIsTaken
+  };
+
+  // Used in BO field of branch instruction.
+  enum branch_condition {
+    bcondCRbiIs0      =  4, // bo=001at
+    bcondCRbiIs1      = 12, // bo=011at
+    bcondAlways       = 20  // bo=10100
+  };
+
+  // Branch condition with combined prediction hints.
+  enum branch_condition_with_hint {
+    bcondCRbiIs0_bhintNoHint     = bcondCRbiIs0 | bhintatNoHint,
+    bcondCRbiIs0_bhintIsNotTaken = bcondCRbiIs0 | bhintatIsNotTaken,
+    bcondCRbiIs0_bhintIsTaken    = bcondCRbiIs0 | bhintatIsTaken,
+    bcondCRbiIs1_bhintNoHint     = bcondCRbiIs1 | bhintatNoHint,
+    bcondCRbiIs1_bhintIsNotTaken = bcondCRbiIs1 | bhintatIsNotTaken,
+    bcondCRbiIs1_bhintIsTaken    = bcondCRbiIs1 | bhintatIsTaken,
+  };
+
+  // Elemental Memory Barriers (>=Power 8)
+  enum Elemental_Membar_mask_bits {
+    StoreStore = 1 << 0,
+    StoreLoad  = 1 << 1,
+    LoadStore  = 1 << 2,
+    LoadLoad   = 1 << 3
+  };
+
+  // Branch prediction hints.
+  inline static int add_bhint_to_boint(const int bhint, const int boint) {
+    switch (boint) {
+      case bcondCRbiIs0:
+      case bcondCRbiIs1:
+        // branch_hint and branch_hint_at have same encodings
+        assert(   (int)bhintNoHint     == (int)bhintatNoHint
+               && (int)bhintIsNotTaken == (int)bhintatIsNotTaken
+               && (int)bhintIsTaken    == (int)bhintatIsTaken,
+               "wrong encodings");
+        assert((bhint & 0x03) == bhint, "wrong encodings");
+        return (boint & ~0x03) | bhint;
+      case bcondAlways:
+        // no branch_hint
+        return boint;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // Extract bcond from boint.
+  inline static int inv_boint_bcond(const int boint) {
+    int r_bcond = boint & ~0x03;
+    assert(r_bcond == bcondCRbiIs0 ||
+           r_bcond == bcondCRbiIs1 ||
+           r_bcond == bcondAlways,
+           "bad branch condition");
+    return r_bcond;
+  }
+
+  // Extract bhint from boint.
+  inline static int inv_boint_bhint(const int boint) {
+    int r_bhint = boint & 0x03;
+    assert(r_bhint == bhintatNoHint ||
+           r_bhint == bhintatIsNotTaken ||
+           r_bhint == bhintatIsTaken,
+           "bad branch hint");
+    return r_bhint;
+  }
+
+  // Calculate opposite of given bcond.
+  inline static int opposite_bcond(const int bcond) {
+    switch (bcond) {
+      case bcondCRbiIs0:
+        return bcondCRbiIs1;
+      case bcondCRbiIs1:
+        return bcondCRbiIs0;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // Calculate opposite of given bhint.
+  inline static int opposite_bhint(const int bhint) {
+    switch (bhint) {
+      case bhintatNoHint:
+        return bhintatNoHint;
+      case bhintatIsNotTaken:
+        return bhintatIsTaken;
+      case bhintatIsTaken:
+        return bhintatIsNotTaken;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // PPC branch instructions
+  enum ppcops {
+    b_op    = 18,
+    bc_op   = 16,
+    bcr_op  = 19
+  };
+
+  enum Condition {
+    negative         = 0,
+    less             = 0,
+    positive         = 1,
+    greater          = 1,
+    zero             = 2,
+    equal            = 2,
+    summary_overflow = 3,
+  };
+
+ public:
+  // Helper functions for groups of instructions
+
+  enum Predict { pt = 1, pn = 0 }; // pt = predict taken
+
+  // instruction must start at passed address
+  static int instr_len(unsigned char *instr) { return BytesPerInstWord; }
+
+  // instruction must be left-justified in argument
+  static int instr_len(unsigned long instr)  { return BytesPerInstWord; }
+
+  // longest instructions
+  static int instr_maxlen() { return BytesPerInstWord; }
+
+  // Test if x is within signed immediate range for nbits.
+  static bool is_simm(int x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 32, "out of bounds");
+    const int   min      = -( ((int)1) << nbits-1 );
+    const int   maxplus1 =  ( ((int)1) << nbits-1 );
+    return min <= x && x < maxplus1;
+  }
+
+  static bool is_simm(jlong x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 64, "out of bounds");
+    const jlong min      = -( ((jlong)1) << nbits-1 );
+    const jlong maxplus1 =  ( ((jlong)1) << nbits-1 );
+    return min <= x && x < maxplus1;
+  }
+
+  // Test if x is within unsigned immediate range for nbits
+  static bool is_uimm(int x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 32, "out of bounds");
+    const int   maxplus1 = ( ((int)1) << nbits );
+    return 0 <= x && x < maxplus1;
+  }
+
+  static bool is_uimm(jlong x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 64, "out of bounds");
+    const jlong maxplus1 =  ( ((jlong)1) << nbits );
+    return 0 <= x && x < maxplus1;
+  }
+
+ protected:
+  // helpers
+
+  // X is supposed to fit in a field "nbits" wide
+  // and be sign-extended. Check the range.
+  static void assert_signed_range(intptr_t x, int nbits) {
+    assert(nbits == 32 || (-(1 << nbits-1) <= x && x < (1 << nbits-1)),
+           "value out of range");
+  }
+
+  static void assert_signed_word_disp_range(intptr_t x, int nbits) {
+    assert((x & 3) == 0, "not word aligned");
+    assert_signed_range(x, nbits + 2);
+  }
+
+  static void assert_unsigned_const(int x, int nbits) {
+    assert(juint(x) < juint(1 << nbits), "unsigned constant out of range");
+  }
+
+  static int fmask(juint hi_bit, juint lo_bit) {
+    assert(hi_bit >= lo_bit && hi_bit < 32, "bad bits");
+    return (1 << ( hi_bit-lo_bit + 1 )) - 1;
+  }
+
+  // inverse of u_field
+  static int inv_u_field(int x, int hi_bit, int lo_bit) {
+    juint r = juint(x) >> lo_bit;
+    r &= fmask(hi_bit, lo_bit);
+    return int(r);
+  }
+
+  // signed version: extract from field and sign-extend
+  static int inv_s_field_ppc(int x, int hi_bit, int lo_bit) {
+    x = x << (31-hi_bit);
+    x = x >> (31-hi_bit+lo_bit);
+    return x;
+  }
+
+  static int u_field(int x, int hi_bit, int lo_bit) {
+    assert((x & ~fmask(hi_bit, lo_bit)) == 0, "value out of range");
+    int r = x << lo_bit;
+    assert(inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
+    return r;
+  }
+
+  // Same as u_field for signed values
+  static int s_field(int x, int hi_bit, int lo_bit) {
+    int nbits = hi_bit - lo_bit + 1;
+    assert(nbits == 32 || (-(1 << nbits-1) <= x && x < (1 << nbits-1)),
+      "value out of range");
+    x &= fmask(hi_bit, lo_bit);
+    int r = x << lo_bit;
+    return r;
+  }
+
+  // inv_op for ppc instructions
+  static int inv_op_ppc(int x) { return inv_u_field(x, 31, 26); }
+
+  // Determine target address from li, bd field of branch instruction.
+  static intptr_t inv_li_field(int x) {
+    intptr_t r = inv_s_field_ppc(x, 25, 2);
+    r = (r << 2);
+    return r;
+  }
+  static intptr_t inv_bd_field(int x, intptr_t pos) {
+    intptr_t r = inv_s_field_ppc(x, 15, 2);
+    r = (r << 2) + pos;
+    return r;
+  }
+
+  #define inv_opp_u_field(x, hi_bit, lo_bit) inv_u_field(x, 31-(lo_bit), 31-(hi_bit))
+  #define inv_opp_s_field(x, hi_bit, lo_bit) inv_s_field_ppc(x, 31-(lo_bit), 31-(hi_bit))
+  // Extract instruction fields from instruction words.
+ public:
+  static int inv_ra_field(int x)  { return inv_opp_u_field(x, 15, 11); }
+  static int inv_rb_field(int x)  { return inv_opp_u_field(x, 20, 16); }
+  static int inv_rt_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  static int inv_rta_field(int x) { return inv_opp_u_field(x, 15, 11); }
+  static int inv_rs_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  // Ds uses opp_s_field(x, 31, 16), but lowest 2 bits must be 0.
+  // Inv_ds_field uses range (x, 29, 16) but shifts by 2 to ensure that lowest bits are 0.
+  static int inv_ds_field(int x)  { return inv_opp_s_field(x, 29, 16) << 2; }
+  static int inv_d1_field(int x)  { return inv_opp_s_field(x, 31, 16); }
+  static int inv_si_field(int x)  { return inv_opp_s_field(x, 31, 16); }
+  static int inv_to_field(int x)  { return inv_opp_u_field(x, 10, 6);  }
+  static int inv_lk_field(int x)  { return inv_opp_u_field(x, 31, 31); }
+  static int inv_bo_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  static int inv_bi_field(int x)  { return inv_opp_u_field(x, 15, 11); }
+
+  #define opp_u_field(x, hi_bit, lo_bit) u_field(x, 31-(lo_bit), 31-(hi_bit))
+  #define opp_s_field(x, hi_bit, lo_bit) s_field(x, 31-(lo_bit), 31-(hi_bit))
+
+  // instruction fields
+  static int aa(       int         x)  { return  opp_u_field(x,             30, 30); }
+  static int ba(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int bb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  static int bc(       int         x)  { return  opp_u_field(x,             25, 21); }
+  static int bd(       int         x)  { return  opp_s_field(x,             29, 16); }
+  static int bf( ConditionRegister cr) { return  bf(cr->encoding()); }
+  static int bf(       int         x)  { return  opp_u_field(x,              8,  6); }
+  static int bfa(ConditionRegister cr) { return  bfa(cr->encoding()); }
+  static int bfa(      int         x)  { return  opp_u_field(x,             13, 11); }
+  static int bh(       int         x)  { return  opp_u_field(x,             20, 19); }
+  static int bi(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int bi0(ConditionRegister cr, Condition c) { return (cr->encoding() << 2) | c; }
+  static int bo(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int bt(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int d1(       int         x)  { return  opp_s_field(x,             31, 16); }
+  static int ds(       int         x)  { assert((x & 0x3) == 0, "unaligned offset"); return opp_s_field(x, 31, 16); }
+  static int eh(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int flm(      int         x)  { return  opp_u_field(x,             14,  7); }
+  static int fra(    FloatRegister r)  { return  fra(r->encoding());}
+  static int frb(    FloatRegister r)  { return  frb(r->encoding());}
+  static int frc(    FloatRegister r)  { return  frc(r->encoding());}
+  static int frs(    FloatRegister r)  { return  frs(r->encoding());}
+  static int frt(    FloatRegister r)  { return  frt(r->encoding());}
+  static int fra(      int         x)  { return  opp_u_field(x,             15, 11); }
+  static int frb(      int         x)  { return  opp_u_field(x,             20, 16); }
+  static int frc(      int         x)  { return  opp_u_field(x,             25, 21); }
+  static int frs(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int frt(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int fxm(      int         x)  { return  opp_u_field(x,             19, 12); }
+  static int l10(      int         x)  { return  opp_u_field(x,             10, 10); }
+  static int l15(      int         x)  { return  opp_u_field(x,             15, 15); }
+  static int l910(     int         x)  { return  opp_u_field(x,             10,  9); }
+  static int e1215(    int         x)  { return  opp_u_field(x,             15, 12); }
+  static int lev(      int         x)  { return  opp_u_field(x,             26, 20); }
+  static int li(       int         x)  { return  opp_s_field(x,             29,  6); }
+  static int lk(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int mb2125(   int         x)  { return  opp_u_field(x,             25, 21); }
+  static int me2630(   int         x)  { return  opp_u_field(x,             30, 26); }
+  static int mb2126(   int         x)  { return  opp_u_field(((x & 0x1f) << 1) | ((x & 0x20) >> 5), 26, 21); }
+  static int me2126(   int         x)  { return  mb2126(x); }
+  static int nb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  //static int opcd(   int         x)  { return  opp_u_field(x,              5,  0); } // is contained in our opcodes
+  static int oe(       int         x)  { return  opp_u_field(x,             21, 21); }
+  static int ra(       Register    r)  { return  ra(r->encoding()); }
+  static int ra(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int rb(       Register    r)  { return  rb(r->encoding()); }
+  static int rb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  static int rc(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int rs(       Register    r)  { return  rs(r->encoding()); }
+  static int rs(       int         x)  { return  opp_u_field(x,             10,  6); }
+  // we don't want to use R0 in memory accesses, because it has value `0' then
+  static int ra0mem(   Register    r)  { assert(r != R0, "cannot use register R0 in memory access"); return ra(r); }
+  static int ra0mem(   int         x)  { assert(x != 0,  "cannot use register 0 in memory access");  return ra(x); }
+
+  // register r is target
+  static int rt(       Register    r)  { return rs(r); }
+  static int rt(       int         x)  { return rs(x); }
+  static int rta(      Register    r)  { return ra(r); }
+  static int rta0mem(  Register    r)  { rta(r); return ra0mem(r); }
+
+  static int sh1620(   int         x)  { return  opp_u_field(x,             20, 16); }
+  static int sh30(     int         x)  { return  opp_u_field(x,             30, 30); }
+  static int sh162030( int         x)  { return  sh1620(x & 0x1f) | sh30((x & 0x20) >> 5); }
+  static int si(       int         x)  { return  opp_s_field(x,             31, 16); }
+  static int spr(      int         x)  { return  opp_u_field(x,             20, 11); }
+  static int sr(       int         x)  { return  opp_u_field(x,             15, 12); }
+  static int tbr(      int         x)  { return  opp_u_field(x,             20, 11); }
+  static int th(       int         x)  { return  opp_u_field(x,             10,  7); }
+  static int thct(     int         x)  { assert((x&8) == 0, "must be valid cache specification");  return th(x); }
+  static int thds(     int         x)  { assert((x&8) == 8, "must be valid stream specification"); return th(x); }
+  static int to(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int u(        int         x)  { return  opp_u_field(x,             19, 16); }
+  static int ui(       int         x)  { return  opp_u_field(x,             31, 16); }
+
+  // Support vector instructions for >= Power6.
+  static int vra(      int         x)  { return  opp_u_field(x,             15, 11); }
+  static int vrb(      int         x)  { return  opp_u_field(x,             20, 16); }
+  static int vrc(      int         x)  { return  opp_u_field(x,             25, 21); }
+  static int vrs(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int vrt(      int         x)  { return  opp_u_field(x,             10,  6); }
+
+  static int vra(   VectorRegister r)  { return  vra(r->encoding());}
+  static int vrb(   VectorRegister r)  { return  vrb(r->encoding());}
+  static int vrc(   VectorRegister r)  { return  vrc(r->encoding());}
+  static int vrs(   VectorRegister r)  { return  vrs(r->encoding());}
+  static int vrt(   VectorRegister r)  { return  vrt(r->encoding());}
+
+  static int vsplt_uim( int        x)  { return  opp_u_field(x,             15, 12); } // for vsplt* instructions
+  static int vsplti_sim(int        x)  { return  opp_u_field(x,             15, 11); } // for vsplti* instructions
+  static int vsldoi_shb(int        x)  { return  opp_u_field(x,             25, 22); } // for vsldoi instruction
+  static int vcmp_rc(   int        x)  { return  opp_u_field(x,             21, 21); } // for vcmp* instructions
+
+  //static int xo1(     int        x)  { return  opp_u_field(x,             29, 21); }// is contained in our opcodes
+  //static int xo2(     int        x)  { return  opp_u_field(x,             30, 21); }// is contained in our opcodes
+  //static int xo3(     int        x)  { return  opp_u_field(x,             30, 22); }// is contained in our opcodes
+  //static int xo4(     int        x)  { return  opp_u_field(x,             30, 26); }// is contained in our opcodes
+  //static int xo5(     int        x)  { return  opp_u_field(x,             29, 27); }// is contained in our opcodes
+  //static int xo6(     int        x)  { return  opp_u_field(x,             30, 27); }// is contained in our opcodes
+  //static int xo7(     int        x)  { return  opp_u_field(x,             31, 30); }// is contained in our opcodes
+
+ protected:
+  // Compute relative address for branch.
+  static intptr_t disp(intptr_t x, intptr_t off) {
+    int xx = x - off;
+    xx = xx >> 2;
+    return xx;
+  }
+
+ public:
+  // signed immediate, in low bits, nbits long
+  static int simm(int x, int nbits) {
+    assert_signed_range(x, nbits);
+    return x & ((1 << nbits) - 1);
+  }
+
+  // unsigned immediate, in low bits, nbits long
+  static int uimm(int x, int nbits) {
+    assert_unsigned_const(x, nbits);
+    return x & ((1 << nbits) - 1);
+  }
+
+  static void set_imm(int* instr, short s) {
+    short* p = ((short *)instr) + 1;
+    *p = s;
+  }
+
+  static int get_imm(address a, int instruction_number) {
+    short imm;
+    short *p =((short *)a)+2*instruction_number+1;
+    imm = *p;
+    return (int)imm;
+  }
+
+  static inline int hi16_signed(  int x) { return (int)(int16_t)(x >> 16); }
+  static inline int lo16_unsigned(int x) { return x & 0xffff; }
+
+ protected:
+
+  // Extract the top 32 bits in a 64 bit word.
+  static int32_t hi32(int64_t x) {
+    int32_t r = int32_t((uint64_t)x >> 32);
+    return r;
+  }
+
+ public:
+
+  static inline unsigned int align_addr(unsigned int addr, unsigned int a) {
+    return ((addr + (a - 1)) & ~(a - 1));
+  }
+
+  static inline bool is_aligned(unsigned int addr, unsigned int a) {
+    return (0 == addr % a);
+  }
+
+  void flush() {
+    AbstractAssembler::flush();
+  }
+
+  inline void emit_int32(int);  // shadows AbstractAssembler::emit_int32
+  inline void emit_data(int);
+  inline void emit_data(int, RelocationHolder const&);
+  inline void emit_data(int, relocInfo::relocType rtype);
+
+  // Emit an address.
+  inline address emit_addr(const address addr = NULL);
+
+#if !defined(ABI_ELFv2)
+  // Emit a function descriptor with the specified entry point, TOC,
+  // and ENV. If the entry point is NULL, the descriptor will point
+  // just past the descriptor.
+  // Use values from friend functions as defaults.
+  inline address emit_fd(address entry = NULL,
+                         address toc = (address) FunctionDescriptor::friend_toc,
+                         address env = (address) FunctionDescriptor::friend_env);
+#endif
+
+  /////////////////////////////////////////////////////////////////////////////////////
+  // PPC instructions
+  /////////////////////////////////////////////////////////////////////////////////////
+
+  // Memory instructions use r0 as hard coded 0, e.g. to simulate loading
+  // immediates. The normal instruction encoders enforce that r0 is not
+  // passed to them. Use either extended mnemonics encoders or the special ra0
+  // versions.
+
+  // Issue an illegal instruction.
+  inline void illtrap();
+  static inline bool is_illtrap(int x);
+
+  // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+  inline void addi( Register d, Register a, int si16);
+  inline void addis(Register d, Register a, int si16);
+ private:
+  inline void addi_r0ok( Register d, Register a, int si16);
+  inline void addis_r0ok(Register d, Register a, int si16);
+ public:
+  inline void addic_( Register d, Register a, int si16);
+  inline void subfic( Register d, Register a, int si16);
+  inline void add(    Register d, Register a, Register b);
+  inline void add_(   Register d, Register a, Register b);
+  inline void subf(   Register d, Register a, Register b);  // d = b - a    "Sub_from", as in ppc spec.
+  inline void sub(    Register d, Register a, Register b);  // d = a - b    Swap operands of subf for readability.
+  inline void subf_(  Register d, Register a, Register b);
+  inline void addc(   Register d, Register a, Register b);
+  inline void addc_(  Register d, Register a, Register b);
+  inline void subfc(  Register d, Register a, Register b);
+  inline void subfc_( Register d, Register a, Register b);
+  inline void adde(   Register d, Register a, Register b);
+  inline void adde_(  Register d, Register a, Register b);
+  inline void subfe(  Register d, Register a, Register b);
+  inline void subfe_( Register d, Register a, Register b);
+  inline void neg(    Register d, Register a);
+  inline void neg_(   Register d, Register a);
+  inline void mulli(  Register d, Register a, int si16);
+  inline void mulld(  Register d, Register a, Register b);
+  inline void mulld_( Register d, Register a, Register b);
+  inline void mullw(  Register d, Register a, Register b);
+  inline void mullw_( Register d, Register a, Register b);
+  inline void mulhw(  Register d, Register a, Register b);
+  inline void mulhw_( Register d, Register a, Register b);
+  inline void mulhd(  Register d, Register a, Register b);
+  inline void mulhd_( Register d, Register a, Register b);
+  inline void mulhdu( Register d, Register a, Register b);
+  inline void mulhdu_(Register d, Register a, Register b);
+  inline void divd(   Register d, Register a, Register b);
+  inline void divd_(  Register d, Register a, Register b);
+  inline void divw(   Register d, Register a, Register b);
+  inline void divw_(  Register d, Register a, Register b);
+
+  // extended mnemonics
+  inline void li(   Register d, int si16);
+  inline void lis(  Register d, int si16);
+  inline void addir(Register d, int si16, Register a);
+
+  static bool is_addi(int x) {
+     return ADDI_OPCODE == (x & ADDI_OPCODE_MASK);
+  }
+  static bool is_addis(int x) {
+     return ADDIS_OPCODE == (x & ADDIS_OPCODE_MASK);
+  }
+  static bool is_bxx(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK);
+  }
+  static bool is_b(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK) && inv_lk_field(x) == 0;
+  }
+  static bool is_bl(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK) && inv_lk_field(x) == 1;
+  }
+  static bool is_bcxx(int x) {
+     return BCXX_OPCODE == (x & BCXX_OPCODE_MASK);
+  }
+  static bool is_bxx_or_bcxx(int x) {
+     return is_bxx(x) || is_bcxx(x);
+  }
+  static bool is_bctrl(int x) {
+     return x == 0x4e800421;
+  }
+  static bool is_bctr(int x) {
+     return x == 0x4e800420;
+  }
+  static bool is_bclr(int x) {
+     return BCLR_OPCODE == (x & XL_FORM_OPCODE_MASK);
+  }
+  static bool is_li(int x) {
+     return is_addi(x) && inv_ra_field(x)==0;
+  }
+  static bool is_lis(int x) {
+     return is_addis(x) && inv_ra_field(x)==0;
+  }
+  static bool is_mtctr(int x) {
+     return MTCTR_OPCODE == (x & MTCTR_OPCODE_MASK);
+  }
+  static bool is_ld(int x) {
+     return LD_OPCODE == (x & LD_OPCODE_MASK);
+  }
+  static bool is_std(int x) {
+     return STD_OPCODE == (x & STD_OPCODE_MASK);
+  }
+  static bool is_stdu(int x) {
+     return STDU_OPCODE == (x & STDU_OPCODE_MASK);
+  }
+  static bool is_stdx(int x) {
+     return STDX_OPCODE == (x & STDX_OPCODE_MASK);
+  }
+  static bool is_stdux(int x) {
+     return STDUX_OPCODE == (x & STDUX_OPCODE_MASK);
+  }
+  static bool is_stwx(int x) {
+     return STWX_OPCODE == (x & STWX_OPCODE_MASK);
+  }
+  static bool is_stwux(int x) {
+     return STWUX_OPCODE == (x & STWUX_OPCODE_MASK);
+  }
+  static bool is_stw(int x) {
+     return STW_OPCODE == (x & STW_OPCODE_MASK);
+  }
+  static bool is_stwu(int x) {
+     return STWU_OPCODE == (x & STWU_OPCODE_MASK);
+  }
+  static bool is_ori(int x) {
+     return ORI_OPCODE == (x & ORI_OPCODE_MASK);
+  };
+  static bool is_oris(int x) {
+     return ORIS_OPCODE == (x & ORIS_OPCODE_MASK);
+  };
+  static bool is_rldicr(int x) {
+     return (RLDICR_OPCODE == (x & RLDICR_OPCODE_MASK));
+  };
+  static bool is_nop(int x) {
+    return x == 0x60000000;
+  }
+  // endgroup opcode for Power6
+  static bool is_endgroup(int x) {
+    return is_ori(x) && inv_ra_field(x) == 1 && inv_rs_field(x) == 1 && inv_d1_field(x) == 0;
+  }
+
+
+ private:
+  // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+  inline void cmpi( ConditionRegister bf, int l, Register a, int si16);
+  inline void cmp(  ConditionRegister bf, int l, Register a, Register b);
+  inline void cmpli(ConditionRegister bf, int l, Register a, int ui16);
+  inline void cmpl( ConditionRegister bf, int l, Register a, Register b);
+
+ public:
+  // extended mnemonics of Compare Instructions
+  inline void cmpwi( ConditionRegister crx, Register a, int si16);
+  inline void cmpdi( ConditionRegister crx, Register a, int si16);
+  inline void cmpw(  ConditionRegister crx, Register a, Register b);
+  inline void cmpd(  ConditionRegister crx, Register a, Register b);
+  inline void cmplwi(ConditionRegister crx, Register a, int ui16);
+  inline void cmpldi(ConditionRegister crx, Register a, int ui16);
+  inline void cmplw( ConditionRegister crx, Register a, Register b);
+  inline void cmpld( ConditionRegister crx, Register a, Register b);
+
+  inline void isel(   Register d, Register a, Register b, int bc);
+  // Convenient version which takes: Condition register, Condition code and invert flag. Omit b to keep old value.
+  inline void isel(   Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b = noreg);
+  // Set d = 0 if (cr.cc) equals 1, otherwise b.
+  inline void isel_0( Register d, ConditionRegister cr, Condition cc, Register b = noreg);
+
+  // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+         void andi(   Register a, Register s, int ui16);   // optimized version
+  inline void andi_(  Register a, Register s, int ui16);
+  inline void andis_( Register a, Register s, int ui16);
+  inline void ori(    Register a, Register s, int ui16);
+  inline void oris(   Register a, Register s, int ui16);
+  inline void xori(   Register a, Register s, int ui16);
+  inline void xoris(  Register a, Register s, int ui16);
+  inline void andr(   Register a, Register s, Register b);  // suffixed by 'r' as 'and' is C++ keyword
+  inline void and_(   Register a, Register s, Register b);
+  // Turn or0(rx,rx,rx) into a nop and avoid that we accidently emit a
+  // SMT-priority change instruction (see SMT instructions below).
+  inline void or_unchecked(Register a, Register s, Register b);
+  inline void orr(    Register a, Register s, Register b);  // suffixed by 'r' as 'or' is C++ keyword
+  inline void or_(    Register a, Register s, Register b);
+  inline void xorr(   Register a, Register s, Register b);  // suffixed by 'r' as 'xor' is C++ keyword
+  inline void xor_(   Register a, Register s, Register b);
+  inline void nand(   Register a, Register s, Register b);
+  inline void nand_(  Register a, Register s, Register b);
+  inline void nor(    Register a, Register s, Register b);
+  inline void nor_(   Register a, Register s, Register b);
+  inline void andc(   Register a, Register s, Register b);
+  inline void andc_(  Register a, Register s, Register b);
+  inline void orc(    Register a, Register s, Register b);
+  inline void orc_(   Register a, Register s, Register b);
+  inline void extsb(  Register a, Register s);
+  inline void extsh(  Register a, Register s);
+  inline void extsw(  Register a, Register s);
+
+  // extended mnemonics
+  inline void nop();
+  // NOP for FP and BR units (different versions to allow them to be in one group)
+  inline void fpnop0();
+  inline void fpnop1();
+  inline void brnop0();
+  inline void brnop1();
+  inline void brnop2();
+
+  inline void mr(      Register d, Register s);
+  inline void ori_opt( Register d, int ui16);
+  inline void oris_opt(Register d, int ui16);
+
+  // endgroup opcode for Power6
+  inline void endgroup();
+
+  // count instructions
+  inline void cntlzw(  Register a, Register s);
+  inline void cntlzw_( Register a, Register s);
+  inline void cntlzd(  Register a, Register s);
+  inline void cntlzd_( Register a, Register s);
+
+  // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+  inline void sld(     Register a, Register s, Register b);
+  inline void sld_(    Register a, Register s, Register b);
+  inline void slw(     Register a, Register s, Register b);
+  inline void slw_(    Register a, Register s, Register b);
+  inline void srd(     Register a, Register s, Register b);
+  inline void srd_(    Register a, Register s, Register b);
+  inline void srw(     Register a, Register s, Register b);
+  inline void srw_(    Register a, Register s, Register b);
+  inline void srad(    Register a, Register s, Register b);
+  inline void srad_(   Register a, Register s, Register b);
+  inline void sraw(    Register a, Register s, Register b);
+  inline void sraw_(   Register a, Register s, Register b);
+  inline void sradi(   Register a, Register s, int sh6);
+  inline void sradi_(  Register a, Register s, int sh6);
+  inline void srawi(   Register a, Register s, int sh5);
+  inline void srawi_(  Register a, Register s, int sh5);
+
+  // extended mnemonics for Shift Instructions
+  inline void sldi(    Register a, Register s, int sh6);
+  inline void sldi_(   Register a, Register s, int sh6);
+  inline void slwi(    Register a, Register s, int sh5);
+  inline void slwi_(   Register a, Register s, int sh5);
+  inline void srdi(    Register a, Register s, int sh6);
+  inline void srdi_(   Register a, Register s, int sh6);
+  inline void srwi(    Register a, Register s, int sh5);
+  inline void srwi_(   Register a, Register s, int sh5);
+
+  inline void clrrdi(  Register a, Register s, int ui6);
+  inline void clrrdi_( Register a, Register s, int ui6);
+  inline void clrldi(  Register a, Register s, int ui6);
+  inline void clrldi_( Register a, Register s, int ui6);
+  inline void clrlsldi(Register a, Register s, int clrl6, int shl6);
+  inline void clrlsldi_(Register a, Register s, int clrl6, int shl6);
+  inline void extrdi(  Register a, Register s, int n, int b);
+  // testbit with condition register
+  inline void testbitdi(ConditionRegister cr, Register a, Register s, int ui6);
+
+  // rotate instructions
+  inline void rotldi(  Register a, Register s, int n);
+  inline void rotrdi(  Register a, Register s, int n);
+  inline void rotlwi(  Register a, Register s, int n);
+  inline void rotrwi(  Register a, Register s, int n);
+
+  // Rotate Instructions
+  inline void rldic(   Register a, Register s, int sh6, int mb6);
+  inline void rldic_(  Register a, Register s, int sh6, int mb6);
+  inline void rldicr(  Register a, Register s, int sh6, int mb6);
+  inline void rldicr_( Register a, Register s, int sh6, int mb6);
+  inline void rldicl(  Register a, Register s, int sh6, int mb6);
+  inline void rldicl_( Register a, Register s, int sh6, int mb6);
+  inline void rlwinm(  Register a, Register s, int sh5, int mb5, int me5);
+  inline void rlwinm_( Register a, Register s, int sh5, int mb5, int me5);
+  inline void rldimi(  Register a, Register s, int sh6, int mb6);
+  inline void rldimi_( Register a, Register s, int sh6, int mb6);
+  inline void rlwimi(  Register a, Register s, int sh5, int mb5, int me5);
+  inline void insrdi(  Register a, Register s, int n,   int b);
+  inline void insrwi(  Register a, Register s, int n,   int b);
+
+  // PPC 1, section 3.3.2 Fixed-Point Load Instructions
+  // 4 bytes
+  inline void lwzx( Register d, Register s1, Register s2);
+  inline void lwz(  Register d, int si16,    Register s1);
+  inline void lwzu( Register d, int si16,    Register s1);
+
+  // 4 bytes
+  inline void lwax( Register d, Register s1, Register s2);
+  inline void lwa(  Register d, int si16,    Register s1);
+
+  // 2 bytes
+  inline void lhzx( Register d, Register s1, Register s2);
+  inline void lhz(  Register d, int si16,    Register s1);
+  inline void lhzu( Register d, int si16,    Register s1);
+
+  // 2 bytes
+  inline void lhax( Register d, Register s1, Register s2);
+  inline void lha(  Register d, int si16,    Register s1);
+  inline void lhau( Register d, int si16,    Register s1);
+
+  // 1 byte
+  inline void lbzx( Register d, Register s1, Register s2);
+  inline void lbz(  Register d, int si16,    Register s1);
+  inline void lbzu( Register d, int si16,    Register s1);
+
+  // 8 bytes
+  inline void ldx(  Register d, Register s1, Register s2);
+  inline void ld(   Register d, int si16,    Register s1);
+  inline void ldu(  Register d, int si16,    Register s1);
+
+  //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+  inline void stwx( Register d, Register s1, Register s2);
+  inline void stw(  Register d, int si16,    Register s1);
+  inline void stwu( Register d, int si16,    Register s1);
+
+  inline void sthx( Register d, Register s1, Register s2);
+  inline void sth(  Register d, int si16,    Register s1);
+  inline void sthu( Register d, int si16,    Register s1);
+
+  inline void stbx( Register d, Register s1, Register s2);
+  inline void stb(  Register d, int si16,    Register s1);
+  inline void stbu( Register d, int si16,    Register s1);
+
+  inline void stdx( Register d, Register s1, Register s2);
+  inline void std(  Register d, int si16,    Register s1);
+  inline void stdu( Register d, int si16,    Register s1);
+  inline void stdux(Register s, Register a,  Register b);
+
+  // PPC 1, section 3.3.13 Move To/From System Register Instructions
+  inline void mtlr( Register s1);
+  inline void mflr( Register d);
+  inline void mtctr(Register s1);
+  inline void mfctr(Register d);
+  inline void mtcrf(int fxm, Register s);
+  inline void mfcr( Register d);
+  inline void mcrf( ConditionRegister crd, ConditionRegister cra);
+  inline void mtcr( Register s);
+
+  // PPC 1, section 2.4.1 Branch Instructions
+  inline void b(  address a, relocInfo::relocType rt = relocInfo::none);
+  inline void b(  Label& L);
+  inline void bl( address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bl( Label& L);
+  inline void bc( int boint, int biint, address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bc( int boint, int biint, Label& L);
+  inline void bcl(int boint, int biint, address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bcl(int boint, int biint, Label& L);
+
+  inline void bclr(  int boint, int biint, int bhint, relocInfo::relocType rt = relocInfo::none);
+  inline void bclrl( int boint, int biint, int bhint, relocInfo::relocType rt = relocInfo::none);
+  inline void bcctr( int boint, int biint, int bhint = bhintbhBCCTRisNotReturnButSame,
+                         relocInfo::relocType rt = relocInfo::none);
+  inline void bcctrl(int boint, int biint, int bhint = bhintbhBCLRisReturn,
+                         relocInfo::relocType rt = relocInfo::none);
+
+  // helper function for b, bcxx
+  inline bool is_within_range_of_b(address a, address pc);
+  inline bool is_within_range_of_bcxx(address a, address pc);
+
+  // get the destination of a bxx branch (b, bl, ba, bla)
+  static inline address  bxx_destination(address baddr);
+  static inline address  bxx_destination(int instr, address pc);
+  static inline intptr_t bxx_destination_offset(int instr, intptr_t bxx_pos);
+
+  // extended mnemonics for branch instructions
+  inline void blt(ConditionRegister crx, Label& L);
+  inline void bgt(ConditionRegister crx, Label& L);
+  inline void beq(ConditionRegister crx, Label& L);
+  inline void bso(ConditionRegister crx, Label& L);
+  inline void bge(ConditionRegister crx, Label& L);
+  inline void ble(ConditionRegister crx, Label& L);
+  inline void bne(ConditionRegister crx, Label& L);
+  inline void bns(ConditionRegister crx, Label& L);
+
+  // Branch instructions with static prediction hints.
+  inline void blt_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bgt_predict_taken(    ConditionRegister crx, Label& L);
+  inline void beq_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bso_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bge_predict_taken(    ConditionRegister crx, Label& L);
+  inline void ble_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bne_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bns_predict_taken(    ConditionRegister crx, Label& L);
+  inline void blt_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bgt_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void beq_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bso_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bge_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void ble_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bne_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bns_predict_not_taken(ConditionRegister crx, Label& L);
+
+  // for use in conjunction with testbitdi:
+  inline void btrue( ConditionRegister crx, Label& L);
+  inline void bfalse(ConditionRegister crx, Label& L);
+
+  inline void bltl(ConditionRegister crx, Label& L);
+  inline void bgtl(ConditionRegister crx, Label& L);
+  inline void beql(ConditionRegister crx, Label& L);
+  inline void bsol(ConditionRegister crx, Label& L);
+  inline void bgel(ConditionRegister crx, Label& L);
+  inline void blel(ConditionRegister crx, Label& L);
+  inline void bnel(ConditionRegister crx, Label& L);
+  inline void bnsl(ConditionRegister crx, Label& L);
+
+  // extended mnemonics for Branch Instructions via LR
+  // We use `blr' for returns.
+  inline void blr(relocInfo::relocType rt = relocInfo::none);
+
+  // extended mnemonics for Branch Instructions with CTR
+  // bdnz means `decrement CTR and jump to L if CTR is not zero'
+  inline void bdnz(Label& L);
+  // Decrement and branch if result is zero.
+  inline void bdz(Label& L);
+  // we use `bctr[l]' for jumps/calls in function descriptor glue
+  // code, e.g. calls to runtime functions
+  inline void bctr( relocInfo::relocType rt = relocInfo::none);
+  inline void bctrl(relocInfo::relocType rt = relocInfo::none);
+  // conditional jumps/branches via CTR
+  inline void beqctr( ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void beqctrl(ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void bnectr( ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void bnectrl(ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+
+  // condition register logic instructions
+  inline void crand( int d, int s1, int s2);
+  inline void crnand(int d, int s1, int s2);
+  inline void cror(  int d, int s1, int s2);
+  inline void crxor( int d, int s1, int s2);
+  inline void crnor( int d, int s1, int s2);
+  inline void creqv( int d, int s1, int s2);
+  inline void crandc(int d, int s1, int s2);
+  inline void crorc( int d, int s1, int s2);
+
+  // icache and dcache related instructions
+  inline void icbi(  Register s1, Register s2);
+  //inline void dcba(Register s1, Register s2); // Instruction for embedded processor only.
+  inline void dcbz(  Register s1, Register s2);
+  inline void dcbst( Register s1, Register s2);
+  inline void dcbf(  Register s1, Register s2);
+
+  enum ct_cache_specification {
+    ct_primary_cache   = 0,
+    ct_secondary_cache = 2
+  };
+  // dcache read hint
+  inline void dcbt(    Register s1, Register s2);
+  inline void dcbtct(  Register s1, Register s2, int ct);
+  inline void dcbtds(  Register s1, Register s2, int ds);
+  // dcache write hint
+  inline void dcbtst(  Register s1, Register s2);
+  inline void dcbtstct(Register s1, Register s2, int ct);
+
+  //  machine barrier instructions:
+  //
+  //  - sync    two-way memory barrier, aka fence
+  //  - lwsync  orders  Store|Store,
+  //                     Load|Store,
+  //                     Load|Load,
+  //            but not Store|Load
+  //  - eieio   orders memory accesses for device memory (only)
+  //  - isync   invalidates speculatively executed instructions
+  //            From the Power ISA 2.06 documentation:
+  //             "[...] an isync instruction prevents the execution of
+  //            instructions following the isync until instructions
+  //            preceding the isync have completed, [...]"
+  //            From IBM's AIX assembler reference:
+  //             "The isync [...] instructions causes the processor to
+  //            refetch any instructions that might have been fetched
+  //            prior to the isync instruction. The instruction isync
+  //            causes the processor to wait for all previous instructions
+  //            to complete. Then any instructions already fetched are
+  //            discarded and instruction processing continues in the
+  //            environment established by the previous instructions."
+  //
+  //  semantic barrier instructions:
+  //  (as defined in orderAccess.hpp)
+  //
+  //  - release  orders Store|Store,       (maps to lwsync)
+  //                     Load|Store
+  //  - acquire  orders  Load|Store,       (maps to lwsync)
+  //                     Load|Load
+  //  - fence    orders Store|Store,       (maps to sync)
+  //                     Load|Store,
+  //                     Load|Load,
+  //                    Store|Load
+  //
+ private:
+  inline void sync(int l);
+ public:
+  inline void sync();
+  inline void lwsync();
+  inline void ptesync();
+  inline void eieio();
+  inline void isync();
+  inline void elemental_membar(int e); // Elemental Memory Barriers (>=Power 8)
+
+  // atomics
+  inline void lwarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+  inline void ldarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+  inline bool lxarx_hint_exclusive_access();
+  inline void lwarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
+  inline void ldarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
+  inline void stwcx_( Register s, Register a, Register b);
+  inline void stdcx_( Register s, Register a, Register b);
+
+  // Instructions for adjusting thread priority for simultaneous
+  // multithreading (SMT) on Power5.
+ private:
+  inline void smt_prio_very_low();
+  inline void smt_prio_medium_high();
+  inline void smt_prio_high();
+
+ public:
+  inline void smt_prio_low();
+  inline void smt_prio_medium_low();
+  inline void smt_prio_medium();
+
+  // trap instructions
+  inline void twi_0(Register a); // for load with acquire semantics use load+twi_0+isync (trap can't occur)
+  // NOT FOR DIRECT USE!!
+ protected:
+  inline void tdi_unchecked(int tobits, Register a, int si16);
+  inline void twi_unchecked(int tobits, Register a, int si16);
+  inline void tdi(          int tobits, Register a, int si16);   // asserts UseSIGTRAP
+  inline void twi(          int tobits, Register a, int si16);   // asserts UseSIGTRAP
+  inline void td(           int tobits, Register a, Register b); // asserts UseSIGTRAP
+  inline void tw(           int tobits, Register a, Register b); // asserts UseSIGTRAP
+
+  static bool is_tdi(int x, int tobits, int ra, int si16) {
+     return (TDI_OPCODE == (x & TDI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (si16 == inv_si_field(x));
+  }
+
+  static bool is_twi(int x, int tobits, int ra, int si16) {
+     return (TWI_OPCODE == (x & TWI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (si16 == inv_si_field(x));
+  }
+
+  static bool is_twi(int x, int tobits, int ra) {
+     return (TWI_OPCODE == (x & TWI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x));
+  }
+
+  static bool is_td(int x, int tobits, int ra, int rb) {
+     return (TD_OPCODE == (x & TD_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (rb == -1/*any reg*/ || rb == inv_rb_field(x));
+  }
+
+  static bool is_tw(int x, int tobits, int ra, int rb) {
+     return (TW_OPCODE == (x & TW_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (rb == -1/*any reg*/ || rb == inv_rb_field(x));
+  }
+
+ public:
+  // PPC floating point instructions
+  // PPC 1, section 4.6.2 Floating-Point Load Instructions
+  inline void lfs(  FloatRegister d, int si16,   Register a);
+  inline void lfsu( FloatRegister d, int si16,   Register a);
+  inline void lfsx( FloatRegister d, Register a, Register b);
+  inline void lfd(  FloatRegister d, int si16,   Register a);
+  inline void lfdu( FloatRegister d, int si16,   Register a);
+  inline void lfdx( FloatRegister d, Register a, Register b);
+
+  // PPC 1, section 4.6.3 Floating-Point Store Instructions
+  inline void stfs(  FloatRegister s, int si16,   Register a);
+  inline void stfsu( FloatRegister s, int si16,   Register a);
+  inline void stfsx( FloatRegister s, Register a, Register b);
+  inline void stfd(  FloatRegister s, int si16,   Register a);
+  inline void stfdu( FloatRegister s, int si16,   Register a);
+  inline void stfdx( FloatRegister s, Register a, Register b);
+
+  // PPC 1, section 4.6.4 Floating-Point Move Instructions
+  inline void fmr(  FloatRegister d, FloatRegister b);
+  inline void fmr_( FloatRegister d, FloatRegister b);
+
+  //  inline void mffgpr( FloatRegister d, Register b);
+  //  inline void mftgpr( Register d, FloatRegister b);
+  inline void cmpb(   Register a, Register s, Register b);
+  inline void popcntb(Register a, Register s);
+  inline void popcntw(Register a, Register s);
+  inline void popcntd(Register a, Register s);
+
+  inline void fneg(  FloatRegister d, FloatRegister b);
+  inline void fneg_( FloatRegister d, FloatRegister b);
+  inline void fabs(  FloatRegister d, FloatRegister b);
+  inline void fabs_( FloatRegister d, FloatRegister b);
+  inline void fnabs( FloatRegister d, FloatRegister b);
+  inline void fnabs_(FloatRegister d, FloatRegister b);
+
+  // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
+  inline void fadd(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadd_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadds( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadds_(FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsub(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsub_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsubs( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsubs_(FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fmul(  FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmul_( FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmuls( FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmuls_(FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fdiv(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdiv_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdivs( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdivs_(FloatRegister d, FloatRegister a, FloatRegister b);
+
+  // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
+  inline void frsp(  FloatRegister d, FloatRegister b);
+  inline void fctid( FloatRegister d, FloatRegister b);
+  inline void fctidz(FloatRegister d, FloatRegister b);
+  inline void fctiw( FloatRegister d, FloatRegister b);
+  inline void fctiwz(FloatRegister d, FloatRegister b);
+  inline void fcfid( FloatRegister d, FloatRegister b);
+  inline void fcfids(FloatRegister d, FloatRegister b);
+
+  // PPC 1, section 4.6.7 Floating-Point Compare Instructions
+  inline void fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b);
+
+  inline void fsqrt( FloatRegister d, FloatRegister b);
+  inline void fsqrts(FloatRegister d, FloatRegister b);
+
+  // Vector instructions for >= Power6.
+  inline void lvebx(    VectorRegister d, Register s1, Register s2);
+  inline void lvehx(    VectorRegister d, Register s1, Register s2);
+  inline void lvewx(    VectorRegister d, Register s1, Register s2);
+  inline void lvx(      VectorRegister d, Register s1, Register s2);
+  inline void lvxl(     VectorRegister d, Register s1, Register s2);
+  inline void stvebx(   VectorRegister d, Register s1, Register s2);
+  inline void stvehx(   VectorRegister d, Register s1, Register s2);
+  inline void stvewx(   VectorRegister d, Register s1, Register s2);
+  inline void stvx(     VectorRegister d, Register s1, Register s2);
+  inline void stvxl(    VectorRegister d, Register s1, Register s2);
+  inline void lvsl(     VectorRegister d, Register s1, Register s2);
+  inline void lvsr(     VectorRegister d, Register s1, Register s2);
+  inline void vpkpx(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkshss(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkswss(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkshus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkswus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuhum(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuwum(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuhus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuwus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vupkhpx(  VectorRegister d, VectorRegister b);
+  inline void vupkhsb(  VectorRegister d, VectorRegister b);
+  inline void vupkhsh(  VectorRegister d, VectorRegister b);
+  inline void vupklpx(  VectorRegister d, VectorRegister b);
+  inline void vupklsb(  VectorRegister d, VectorRegister b);
+  inline void vupklsh(  VectorRegister d, VectorRegister b);
+  inline void vmrghb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrghw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrghh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsplt(    VectorRegister d, int ui4,          VectorRegister b);
+  inline void vsplth(   VectorRegister d, int ui3,          VectorRegister b);
+  inline void vspltw(   VectorRegister d, int ui2,          VectorRegister b);
+  inline void vspltisb( VectorRegister d, int si5);
+  inline void vspltish( VectorRegister d, int si5);
+  inline void vspltisw( VectorRegister d, int si5);
+  inline void vperm(    VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsel(     VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsl(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsldoi(   VectorRegister d, VectorRegister a, VectorRegister b, int si4);
+  inline void vslo(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsr(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsro(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddcuw(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddshs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddsbs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddubm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduwm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduhm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddubs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduhs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubcuw(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubshs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubsbs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsububm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuwm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuhm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsububs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuhs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulesb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuleub(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulesh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuleuh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulosb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuloub(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulosh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulouh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmhaddshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmhraddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmladduhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsubuhm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsummbm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumshm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumshs( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumuhm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumuhs( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsumsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum2sws( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4sbs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4ubs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4shs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavguw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavguh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxuw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxuh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminuw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminuh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequb( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsb( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtub( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequb_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsb_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtub_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vand(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vandc(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vnor(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vor(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vxor(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vslb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vskw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vslh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrab(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsraw(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrah(    VectorRegister d, VectorRegister a, VectorRegister b);
+  // Vector Floating-Point not implemented yet
+  inline void mtvscr(   VectorRegister b);
+  inline void mfvscr(   VectorRegister d);
+
+  // The following encoders use r0 as second operand. These instructions
+  // read r0 as '0'.
+  inline void lwzx( Register d, Register s2);
+  inline void lwz(  Register d, int si16);
+  inline void lwax( Register d, Register s2);
+  inline void lwa(  Register d, int si16);
+  inline void lhzx( Register d, Register s2);
+  inline void lhz(  Register d, int si16);
+  inline void lhax( Register d, Register s2);
+  inline void lha(  Register d, int si16);
+  inline void lbzx( Register d, Register s2);
+  inline void lbz(  Register d, int si16);
+  inline void ldx(  Register d, Register s2);
+  inline void ld(   Register d, int si16);
+  inline void stwx( Register d, Register s2);
+  inline void stw(  Register d, int si16);
+  inline void sthx( Register d, Register s2);
+  inline void sth(  Register d, int si16);
+  inline void stbx( Register d, Register s2);
+  inline void stb(  Register d, int si16);
+  inline void stdx( Register d, Register s2);
+  inline void std(  Register d, int si16);
+
+  // PPC 2, section 3.2.1 Instruction Cache Instructions
+  inline void icbi(    Register s2);
+  // PPC 2, section 3.2.2 Data Cache Instructions
+  //inlinevoid dcba(   Register s2); // Instruction for embedded processor only.
+  inline void dcbz(    Register s2);
+  inline void dcbst(   Register s2);
+  inline void dcbf(    Register s2);
+  // dcache read hint
+  inline void dcbt(    Register s2);
+  inline void dcbtct(  Register s2, int ct);
+  inline void dcbtds(  Register s2, int ds);
+  // dcache write hint
+  inline void dcbtst(  Register s2);
+  inline void dcbtstct(Register s2, int ct);
+
+  // Atomics: use ra0mem to disallow R0 as base.
+  inline void lwarx_unchecked(Register d, Register b, int eh1);
+  inline void ldarx_unchecked(Register d, Register b, int eh1);
+  inline void lwarx( Register d, Register b, bool hint_exclusive_access);
+  inline void ldarx( Register d, Register b, bool hint_exclusive_access);
+  inline void stwcx_(Register s, Register b);
+  inline void stdcx_(Register s, Register b);
+  inline void lfs(   FloatRegister d, int si16);
+  inline void lfsx(  FloatRegister d, Register b);
+  inline void lfd(   FloatRegister d, int si16);
+  inline void lfdx(  FloatRegister d, Register b);
+  inline void stfs(  FloatRegister s, int si16);
+  inline void stfsx( FloatRegister s, Register b);
+  inline void stfd(  FloatRegister s, int si16);
+  inline void stfdx( FloatRegister s, Register b);
+  inline void lvebx( VectorRegister d, Register s2);
+  inline void lvehx( VectorRegister d, Register s2);
+  inline void lvewx( VectorRegister d, Register s2);
+  inline void lvx(   VectorRegister d, Register s2);
+  inline void lvxl(  VectorRegister d, Register s2);
+  inline void stvebx(VectorRegister d, Register s2);
+  inline void stvehx(VectorRegister d, Register s2);
+  inline void stvewx(VectorRegister d, Register s2);
+  inline void stvx(  VectorRegister d, Register s2);
+  inline void stvxl( VectorRegister d, Register s2);
+  inline void lvsl(  VectorRegister d, Register s2);
+  inline void lvsr(  VectorRegister d, Register s2);
+
+  // RegisterOrConstant versions.
+  // These emitters choose between the versions using two registers and
+  // those with register and immediate, depending on the content of roc.
+  // If the constant is not encodable as immediate, instructions to
+  // load the constant are emitted beforehand. Store instructions need a
+  // tmp reg if the constant is not encodable as immediate.
+  // Size unpredictable.
+  void ld(  Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lwa( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lwz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lha( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lhz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lbz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void std( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void stw( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void sth( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void stb( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void add( Register d, RegisterOrConstant roc, Register s1);
+  void subf(Register d, RegisterOrConstant roc, Register s1);
+  void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1);
+
+
+  // Emit several instructions to load a 64 bit constant. This issues a fixed
+  // instruction pattern so that the constant can be patched later on.
+  enum {
+    load_const_size = 5 * BytesPerInstWord
+  };
+         void load_const(Register d, long a,            Register tmp = noreg);
+  inline void load_const(Register d, void* a,           Register tmp = noreg);
+  inline void load_const(Register d, Label& L,          Register tmp = noreg);
+  inline void load_const(Register d, AddressLiteral& a, Register tmp = noreg);
+
+  // Load a 64 bit constant, optimized, not identifyable.
+  // Tmp can be used to increase ILP. Set return_simm16_rest = true to get a
+  // 16 bit immediate offset. This is useful if the offset can be encoded in
+  // a succeeding instruction.
+         int load_const_optimized(Register d, long a,  Register tmp = noreg, bool return_simm16_rest = false);
+  inline int load_const_optimized(Register d, void* a, Register tmp = noreg, bool return_simm16_rest = false) {
+    return load_const_optimized(d, (long)(unsigned long)a, tmp, return_simm16_rest);
+  }
+
+  // Creation
+  Assembler(CodeBuffer* code) : AbstractAssembler(code) {
+#ifdef CHECK_DELAY
+    delay_state = no_delay;
+#endif
+  }
+
+  // Testing
+#ifndef PRODUCT
+  void test_asm();
+#endif
+};
+
+
+#endif // CPU_PPC_VM_ASSEMBLER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Wed Apr 02 09:59:18 2014 -0700
@@ -0,0 +1,823 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2014 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
+#define CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
+
+#include "asm/assembler.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "code/codeCache.hpp"
+
+inline void Assembler::emit_int32(int x) {
+  AbstractAssembler::emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x) {
+  emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
+  relocate(rtype);
+  emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
+  relocate(rspec);
+  emit_int32(x);
+}
+
+// Emit an address
+inline address Assembler::emit_addr(const address addr) {
+  address start = pc();
+  emit_address(addr);
+  return start;
+}
+
+#if !defined(ABI_ELFv2)
+// Emit a function descriptor with the specified entry point, TOC, and
+// ENV. If the entry point is NULL, the descriptor will point just
+// past the descriptor.
+inline address Assembler::emit_fd(address entry, address toc, address env) {
+  FunctionDescriptor* fd = (FunctionDescriptor*)pc();
+
+  assert(sizeof(FunctionDescriptor) == 3*sizeof(address), "function descriptor size");
+
+  (void)emit_addr();
+  (void)emit_addr();
+  (void)emit_addr();
+
+  fd->set_entry(entry == NULL ? pc() : entry);
+  fd->set_toc(toc);
+  fd->set_env(env);
+
+  return (address)fd;
+}
+#endif
+
+// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
+inline void Assembler::illtrap() { Assembler::emit_int32(0); }
+inline bool Assembler::is_illtrap(int x) { return x == 0; }
+
+// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+inline void Assembler::addi(   Register d, Register a, int si16)   { assert(a != R0, "r0 not allowed"); addi_r0ok( d, a, si16); }
+inline void Assembler::addis(  Register d, Register a, int si16)   { assert(a != R0, "r0 not allowed"); addis_r0ok(d, a, si16); }
+inline void Assembler::addi_r0ok(Register d,Register a,int si16)   { emit_int32(ADDI_OPCODE   | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::addis_r0ok(Register d,Register a,int si16)  { emit_int32(ADDIS_OPCODE  | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::addic_( Register d, Register a, int si16)   { emit_int32(ADDIC__OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::subfic( Register d, Register a, int si16)   { emit_int32(SUBFIC_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::add(    Register d, Register a, Register b) { emit_int32(ADD_OPCODE    | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::add_(   Register d, Register a, Register b) { emit_int32(ADD_OPCODE    | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subf(   Register d, Register a, Register b) { emit_int32(SUBF_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::sub(    Register d, Register a, Register b) { subf(d, b, a); }
+inline void Assembler::subf_(  Register d, Register a, Register b) { emit_int32(SUBF_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::addc(   Register d, Register a, Register b) { emit_int32(ADDC_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::addc_(  Register d, Register a, Register b) { emit_int32(ADDC_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subfc(  Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::subfc_( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::adde(   Register d, Register a, Register b) { emit_int32(ADDE_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::adde_(  Register d, Register a, Register b) { emit_int32(ADDE_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subfe(  Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::subfe_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::neg(    Register d, Register a)             { emit_int32(NEG_OPCODE    | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::neg_(   Register d, Register a)             { emit_int32(NEG_OPCODE    | rt(d) | ra(a) | oe(0) | rc(1)); }
+inline void Assembler::mulli(  Register d, Register a, int si16)   { emit_int32(MULLI_OPCODE  | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::mulld(  Register d, Register a, Register b) { emit_int32(MULLD_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::mulld_( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::mullw(  Register d, Register a, Register b) { emit_int32(MULLW_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::mulhw(  Register d, Register a, Register b) { emit_int32(MULHW_OPCODE  | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE  | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::mulhd(  Register d, Register a, Register b) { emit_int32(MULHD_OPCODE  | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE  | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhdu_(Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::divd(   Register d, Register a, Register b) { emit_int32(DIVD_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::divd_(  Register d, Register a, Register b) { emit_int32(DIVD_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::divw(   Register d, Register a, Register b) { emit_int32(DIVW_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::divw_(  Register d, Register a, Register b) { emit_int32(DIVW_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+
+// extended mnemonics
+inline void Assembler::li(   Register d, int si16)             { Assembler::addi_r0ok( d, R0, si16); }
+inline void Assembler::lis(  Register d, int si16)             { Assembler::addis_r0ok(d, R0, si16); }
+inline void Assembler::addir(Register d, int si16, Register a) { Assembler::addi(d, a, si16); }
+
+// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+inline void Assembler::cmpi(  ConditionRegister f, int l, Register a, int si16)   { emit_int32( CMPI_OPCODE  | bf(f) | l10(l) | ra(a) | simm(si16,16)); }
+inline void Assembler::cmp(   ConditionRegister f, int l, Register a, Register b) { emit_int32( CMP_OPCODE   | bf(f) | l10(l) | ra(a) | rb(b)); }
+inline void Assembler::cmpli( ConditionRegister f, int l, Register a, int ui16)   { emit_int32( CMPLI_OPCODE | bf(f) | l10(l) | ra(a) | uimm(ui16,16)); }
+inline void Assembler::cmpl(  ConditionRegister f, int l, Register a, Register b) { emit_int32( CMPL_OPCODE  | bf(f) | l10(l) | ra(a) | rb(b)); }
+
+// extended mnemonics of Compare Instructions
+inline void Assembler::cmpwi( ConditionRegister crx, Register a, int si16)   { Assembler::cmpi( crx, 0, a, si16); }
+inline void Assembler::cmpdi( ConditionRegister crx, Register a, int si16)   { Assembler::cmpi( crx, 1, a, si16); }
+inline void Assembler::cmpw(  ConditionRegister crx, Register a, Register b) { Assembler::cmp(  crx, 0, a, b); }
+inline void Assembler::cmpd(  ConditionRegister crx, Register a, Register b) { Assembler::cmp(  crx, 1, a, b); }
+inline void Assembler::cmplwi(ConditionRegister crx, Register a, int ui16)   { Assembler::cmpli(crx, 0, a, ui16); }
+inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16)   { Assembler::cmpli(crx, 1, a, ui16); }
+inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
+inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
+
+inline void Assembler::isel(Register d, Register a, Register b, int c) { guarantee(VM_Version::has_isel(), "opcode not supported on this hardware");
+                                                                         emit_int32(ISEL_OPCODE    | rt(d)  | ra(a) | rb(b) | bc(c)); }
+
+// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+inline void Assembler::andi_(   Register a, Register s, int ui16)      { emit_int32(ANDI_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::andis_(  Register a, Register s, int ui16)      { emit_int32(ANDIS_OPCODE   | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::ori(     Register a, Register s, int ui16)      { emit_int32(ORI_OPCODE     | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::oris(    Register a, Register s, int ui16)      { emit_int32(ORIS_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::xori(    Register a, Register s, int ui16)      { emit_int32(XORI_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::xoris(   Register a, Register s, int ui16)      { emit_int32(XORIS_OPCODE   | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::andr(    Register a, Register s, Register b)    { emit_int32(AND_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::and_(    Register a, Register s, Register b)    { emit_int32(AND_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+
+inline void Assembler::or_unchecked(Register a, Register s, Register b){ emit_int32(OR_OPCODE      | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::orr(     Register a, Register s, Register b)    { if (a==s && s==b) { Assembler::nop(); } else { Assembler::or_unchecked(a,s,b); } }
+inline void Assembler::or_(     Register a, Register s, Register b)    { emit_int32(OR_OPCODE      | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::xorr(    Register a, Register s, Register b)    { emit_int32(XOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::xor_(    Register a, Register s, Register b)    { emit_int32(XOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::nand(    Register a, Register s, Register b)    { emit_int32(NAND_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::nand_(   Register a, Register s, Register b)    { emit_int32(NAND_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::nor(     Register a, Register s, Register b)    { emit_int32(NOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::nor_(    Register a, Register s, Register b)    { emit_int32(NOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::andc(    Register a, Register s, Register b)    { emit_int32(ANDC_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::andc_(   Register a, Register s, Register b)    { emit_int32(ANDC_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::orc(     Register a, Register s, Register b)    { emit_int32(ORC_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::orc_(    Register a, Register s, Register b)    { emit_int32(ORC_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::extsb(   Register a, Register s)                { emit_int32(EXTSB_OPCODE   | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::extsh(   Register a, Register s)                { emit_int32(EXTSH_OPCODE   | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::extsw(   Register a, Register s)                { emit_int32(EXTSW_OPCODE   | rta(a) | rs(s) | rc(0)); }
+
+// extended mnemonics
+inline void Assembler::nop()                              { Assembler::ori(R0, R0, 0); }
+// NOP for FP and BR units (different versions to allow them to be in one group)
+inline void Assembler::fpnop0()                           { Assembler::fmr(F30, F30); }
+inline void Assembler::fpnop1()                           { Assembler::fmr(F31, F31); }
+inline void Assembler::brnop0()                           { Assembler::mcrf(CCR2, CCR2); }
+inline void Assembler::brnop1()                           { Assembler::mcrf(CCR3, CCR3); }
+inline void Assembler::brnop2()                           { Assembler::mcrf(CCR4,  CCR4); }
+
+inline void Assembler::mr(      Register d, Register s)   { Assembler::orr(d, s, s); }
+inline void Assembler::ori_opt( Register d, int ui16)     { if (ui16!=0) Assembler::ori( d, d, ui16); }
+inline void Assembler::oris_opt(Register d, int ui16)     { if (ui16!=0) Assembler::oris(d, d, ui16); }
+
+inline void Assembler::endgroup()                         { Assembler::ori(R1, R1, 0); }
+
+// count instructions
+inline void Assembler::cntlzw(  Register a, Register s)              { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::cntlzw_( Register a, Register s)              { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }
+inline void Assembler::cntlzd(  Register a, Register s)              { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::cntlzd_( Register a, Register s)              { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(1)); }
+
+// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+inline void Assembler::sld(     Register a, Register s, Register b)  { emit_int32(SLD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::sld_(    Register a, Register s, Register b)  { emit_int32(SLD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::slw(     Register a, Register s, Register b)  { emit_int32(SLW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::slw_(    Register a, Register s, Register b)  { emit_int32(SLW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srd(     Register a, Register s, Register b)  { emit_int32(SRD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srd_(    Register a, Register s, Register b)  { emit_int32(SRD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srw(     Register a, Register s, Register b)  { emit_int32(SRW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srw_(    Register a, Register s, Register b)  { emit_int32(SRW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srad(    Register a, Register s, Register b)  { emit_int32(SRAD_OPCODE   | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srad_(   Register a, Register s, Register b)  { emit_int32(SRAD_OPCODE   | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::sraw(    Register a, Register s, Register b)  { emit_int32(SRAW_OPCODE   | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::sraw_(   Register a, Register s, Register b)  { emit_int32(SRAW_OPCODE   | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::sradi(   Register a, Register s, int sh6)     { emit_int32(SRADI_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | rc(0)); }
+inline void Assembler::sradi_(  Register a, Register s, int sh6)     { emit_int32(SRADI_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | rc(1)); }
+inline void Assembler::srawi(   Register a, Register s, int sh5)     { emit_int32(SRAWI_OPCODE  | rta(a) | rs(s) | sh1620(sh5) | rc(0)); }
+inline void Assembler::srawi_(  Register a, Register s, int sh5)     { emit_int32(SRAWI_OPCODE  | rta(a) | rs(s) | sh1620(sh5) | rc(1)); }
+
+// extended mnemonics for Shift Instructions
+inline void Assembler::sldi(    Register a, Register s, int sh6)     { Assembler::rldicr(a, s, sh6, 63-sh6); }
+inline void Assembler::sldi_(   Register a, Register s, int sh6)     { Assembler::rldicr_(a, s, sh6, 63-sh6); }
+inline void Assembler::slwi(    Register a, Register s, int sh5)     { Assembler::rlwinm(a, s, sh5, 0, 31-sh5); }
+inline void Assembler::slwi_(   Register a, Register s, int sh5)     { Assembler::rlwinm_(a, s, sh5, 0, 31-sh5); }
+inline void Assembler::srdi(    Register a, Register s, int sh6)     { Assembler::rldicl(a, s, 64-sh6, sh6); }
+inline void Assembler::srdi_(   Register a, Register s, int sh6)     { Assembler::rldicl_(a, s, 64-sh6, sh6); }
+inline void Assembler::srwi(    Register a, Register s, int sh5)     { Assembler::rlwinm(a, s, 32-sh5, sh5, 31); }
+inline void Assembler::srwi_(   Register a, Register s, int sh5)     { Assembler::rlwinm_(a, s, 32-sh5, sh5, 31); }
+
+inline void Assembler::clrrdi(  Register a, Register s, int ui6)     { Assembler::rldicr(a, s, 0, 63-ui6); }
+inline void Assembler::clrrdi_( Register a, Register s, int ui6)     { Assembler::rldicr_(a, s, 0, 63-ui6); }
+inline void Assembler::clrldi(  Register a, Register s, int ui6)     { Assembler::rldicl(a, s, 0, ui6); }
+inline void Assembler::clrldi_( Register a, Register s, int ui6)     { Assembler::rldicl_(a, s, 0, ui6); }
+inline void Assembler::clrlsldi( Register a, Register s, int clrl6, int shl6) { Assembler::rldic( a, s, shl6, clrl6-shl6); }
+inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) { Assembler::rldic_(a, s, shl6, clrl6-shl6); }
+inline void Assembler::extrdi(  Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
+// testbit with condition register.
+inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
+  if (cr == CCR0) {
+    Assembler::rldicr_(a, s, 63-ui6, 0);
+  } else {
+    Assembler::rldicr(a, s, 63-ui6, 0);
+    Assembler::cmpdi(cr, a, 0);
+  }
+}
+
+// rotate instructions
+inline void Assembler::rotldi( Register a, Register s, int n) { Assembler::rldicl(a, s, n, 0); }
+inline void Assembler::rotrdi( Register a, Register s, int n) { Assembler::rldicl(a, s, 64-n, 0); }
+inline void Assembler::rotlwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, n, 0, 31); }
+inline void Assembler::rotrwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, 32-n, 0, 31); }
+
+inline void Assembler::rldic(   Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIC_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rldic_(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIC_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::rldicr(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rldicr_( Register a, Register s, int sh6, int mb6)         { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::rldicl(  Register a, Register s, int sh6, int me6)         { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(0)); }
+inline void Assembler::rldicl_( Register a, Register s, int sh6, int me6)         { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(1)); }
+inline void Assembler::rlwinm(  Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
+inline void Assembler::rlwinm_( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(1)); }
+inline void Assembler::rldimi(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rlwimi(  Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWIMI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
+inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::insrdi(  Register a, Register s, int n,   int b)           { Assembler::rldimi(a, s, 64-(b+n), b); }
+inline void Assembler::insrwi(  Register a, Register s, int n,   int b)           { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
+
+// PPC 1, section 3.3.2 Fixed-Point Load Instructions
+inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwz(  Register d, int si16,    Register s1) { emit_int32(LWZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lwzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LWZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwa(  Register d, int si16,    Register s1) { emit_int32(LWA_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
+
+inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lhz(  Register d, int si16,    Register s1) { emit_int32(LHZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lhzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lha(  Register d, int si16,    Register s1) { emit_int32(LHA_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lhau( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lbzx( Register d, Register s1, Register s2) { emit_int32(LBZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lbz(  Register d, int si16,    Register s1) { emit_int32(LBZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lbzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::ld(   Register d, int si16,    Register s1) { emit_int32(LD_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
+inline void Assembler::ldx(  Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::ldu(  Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
+
+//  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stw(  Register d, int si16,    Register s1) { emit_int32(STW_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::stwu( Register d, int si16,    Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::sth(  Register d, int si16,    Register s1) { emit_int32(STH_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::sthu( Register d, int si16,    Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stb(  Register d, int si16,    Register s1) { emit_int32(STB_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::stbu( Register d, int si16,    Register s1) { emit_int32(STBU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::std(  Register d, int si16,    Register s1) { emit_int32(STD_OPCODE  | rs(d) | ds(si16)   | ra0mem(s1));}
+inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stdu( Register d, int si16,    Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16)   | rta0mem(s1));}
+inline void Assembler::stdux(Register s, Register a,  Register b)  { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
+
+// PPC 1, section 3.3.13 Move To/From System Register Instructions
+inline void Assembler::mtlr( Register s1)         { emit_int32(MTLR_OPCODE  | rs(s1)); }
+inline void Assembler::mflr( Register d )         { emit_int32(MFLR_OPCODE  | rt(d)); }
+inline void Assembler::mtctr(Register s1)         { emit_int32(MTCTR_OPCODE | rs(s1)); }
+inline void Assembler::mfctr(Register d )         { emit_int32(MFCTR_OPCODE | rt(d)); }
+inline void Assembler::mtcrf(int afxm, Register s){ emit_int32(MTCRF_OPCODE | fxm(afxm) | rs(s)); }
+inline void Assembler::mfcr( Register d )         { emit_int32(MFCR_OPCODE  | rt(d)); }
+inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
+                                                      { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
+inline void Assembler::mtcr( Register s)          { Assembler::mtcrf(0xff, s); }
+
+// SAP JVM 2006-02-13 PPC branch instruction.
+// PPC 1, section 2.4.1 Branch Instructions
+inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
+inline void Assembler::b( Label& L)                           { b( target(L)); }
+inline void Assembler::bl(address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(1), rt); }
+inline void Assembler::bl(Label& L)                           { bl(target(L)); }
+inline void Assembler::bc( int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0) | lk(0), rt); }
+inline void Assembler::bc( int boint, int biint, Label& L)                           { bc(boint, biint, target(L)); }
+inline void Assembler::bcl(int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0)|lk(1)); }
+inline void Assembler::bcl(int boint, int biint, Label& L)                           { bcl(boint, biint, target(L)); }
+
+inline void Assembler::bclr(  int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
+inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
+inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
+inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
+
+// helper function for b
+inline bool Assembler::is_within_range_of_b(address a, address pc) {
+  // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+  if ((((uint64_t)a) & 0x3) != 0) return false;
+
+  const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
+  int value = disp(intptr_t(a), intptr_t(pc));
+  bool result = -range <= value && value < range-1;
+#ifdef ASSERT
+  if (result) li(value); // Assert that value is in correct range.
+#endif
+  return result;
+}
+
+// helper functions for bcxx.
+inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
+  // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+  if ((((uint64_t)a) & 0x3) != 0) return false;
+
+  const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
+  int value = disp(intptr_t(a), intptr_t(pc));
+  bool result = -range <= value && value < range-1;
+#ifdef ASSERT
+  if (result) bd(value); // Assert that value is in correct range.
+#endif
+  return result;
+}
+
+// Get the destination of a bxx branch (b, bl, ba, bla).
+address  Assembler::bxx_destination(address baddr) { return bxx_destination(*(int*)baddr, baddr); }
+address  Assembler::bxx_destination(int instr, address pc) { return (address)bxx_destination_offset(instr, (intptr_t)pc); }
+intptr_t Assembler::bxx_destination_offset(int instr, intptr_t bxx_pos) {
+  intptr_t displ = inv_li_field(instr);
+  return bxx_pos + displ;
+}
+
+// Extended mnemonics for Branch Instructions
+inline void Assembler::blt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, less), L); }
+inline void Assembler::bgt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, greater), L); }
+inline void Assembler::beq(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, equal), L); }
+inline void Assembler::bso(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
+inline void Assembler::bge(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, less), L); }
+inline void Assembler::ble(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, greater), L); }
+inline void Assembler::bne(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, equal), L); }
+inline void Assembler::bns(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
+
+// Branch instructions with static prediction hints.
+inline void Assembler::blt_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, less), L); }
+inline void Assembler::bgt_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, greater), L); }
+inline void Assembler::beq_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, equal), L); }
+inline void Assembler::bso_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, summary_overflow), L); }
+inline void Assembler::bge_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, less), L); }
+inline void Assembler::ble_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, greater), L); }
+inline void Assembler::bne_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, equal), L); }
+inline void Assembler::bns_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, summary_overflow), L); }
+inline void Assembler::blt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, less), L); }
+inline void Assembler::bgt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, greater), L); }
+inline void Assembler::beq_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, equal), L); }
+inline void Assembler::bso_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
+inline void Assembler::bge_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, less), L); }
+inline void Assembler::ble_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, greater), L); }
+inline void Assembler::bne_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, equal), L); }
+inline void Assembler::bns_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
+
+// For use in conjunction with testbitdi:
+inline void Assembler::btrue( ConditionRegister crx, Label& L) { Assembler::bne(crx, L); }
+inline void Assembler::bfalse(ConditionRegister crx, Label& L) { Assembler::beq(crx, L); }
+
+inline void Assembler::bltl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, less), L); }
+inline void Assembler::bgtl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, greater), L); }
+inline void Assembler::beql(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, equal), L); }
+inline void Assembler::bsol(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
+inline void Assembler::bgel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, less), L); }
+inline void Assembler::blel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, greater), L); }
+inline void Assembler::bnel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, equal), L); }
+inline void Assembler::bnsl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
+
+// Extended mnemonics for Branch Instructions via LR.
+// We use `blr' for returns.
+inline void Assembler::blr(relocInfo::relocType rt) { Assembler::bclr(bcondAlways, 0, bhintbhBCLRisReturn, rt); }
+
+// Extended mnemonics for Branch Instructions with CTR.
+// Bdnz means `decrement CTR and jump to L if CTR is not zero'.
+inline void Assembler::bdnz(Label& L) { Assembler::bc(16, 0, L); }
+// Decrement and branch if result is zero.
+inline void Assembler::bdz(Label& L)  { Assembler::bc(18, 0, L); }
+// We use `bctr[l]' for jumps/calls in function descriptor glue
+// code, e.g. for calls to runtime functions.
+inline void Assembler::bctr( relocInfo::relocType rt) { Assembler::bcctr(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bctrl(relocInfo::relocType rt) { Assembler::bcctrl(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
+// Conditional jumps/branches via CTR.
+inline void Assembler::beqctr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::beqctrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bnectr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bnectrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+
+// condition register logic instructions
+inline void Assembler::crand( int d, int s1, int s2) { emit_int32(CRAND_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crnand(int d, int s1, int s2) { emit_int32(CRNAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::cror(  int d, int s1, int s2) { emit_int32(CROR_OPCODE   | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crxor( int d, int s1, int s2) { emit_int32(CRXOR_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crnor( int d, int s1, int s2) { emit_int32(CRNOR_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::creqv( int d, int s1, int s2) { emit_int32(CREQV_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crandc(int d, int s1, int s2) { emit_int32(CRANDC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crorc( int d, int s1, int s2) { emit_int32(CRORC_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+
+// Conditional move (>= Power7)
+inline void Assembler::isel(Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b) {
+  if (b == noreg) {
+    b = d; // Can be omitted if old value should be kept in "else" case.
+  }
+  Register first = a;
+  Register second = b;
+  if (inv) {
+    first = b;
+    second = a; // exchange
+  }
+  assert(first != R0, "r0 not allowed");
+  isel(d, first, second, bi0(cr, cc));
+}
+inline void Assembler::isel_0(Register d, ConditionRegister cr, Condition cc, Register b) {
+  if (b == noreg) {
+    b = d; // Can be omitted if old value should be kept in "else" case.
+  }
+  isel(d, R0, b, bi0(cr, cc));
+}
+
+// PPC 2, section 3.2.1 Instruction Cache Instructions
+inline void Assembler::icbi(    Register s1, Register s2)         { emit_int32( ICBI_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+// PPC 2, section 3.2.2 Data Cache Instructions
+//inline void Assembler::dcba(  Register s1, Register s2)         { emit_int32( DCBA_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbz(    Register s1, Register s2)         { emit_int32( DCBZ_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbst(   Register s1, Register s2)         { emit_int32( DCBST_OPCODE  | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbf(    Register s1, Register s2)         { emit_int32( DCBF_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+// dcache read hint
+inline void Assembler::dcbt(    Register s1, Register s2)         { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbtct(  Register s1, Register s2, int ct) { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2) | thct(ct)); }
+inline void Assembler::dcbtds(  Register s1, Register s2, int ds) { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2) | thds(ds)); }
+// dcache write hint
+inline void Assembler::dcbtst(  Register s1, Register s2)         { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbtstct(Register s1, Register s2, int ct) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
+
+// machine barrier instructions:
+inline void Assembler::sync(int a) { emit_int32( SYNC_OPCODE | l910(a)); }
+inline void Assembler::sync()      { Assembler::sync(0); }
+inline void Assembler::lwsync()    { Assembler::sync(1); }
+inline void Assembler::ptesync()   { Assembler::sync(2); }
+inline void Assembler::eieio()     { emit_int32( EIEIO_OPCODE); }
+inline void Assembler::isync()     { emit_int32( ISYNC_OPCODE); }
+inline void Assembler::elemental_membar(int e) { assert(0 < e && e < 16, "invalid encoding"); emit_int32( SYNC_OPCODE | e1215(e)); }
+
+// atomics
+// Use ra0mem to disallow R0 as base.
+inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline bool Assembler::lxarx_hint_exclusive_access()                                          { return VM_Version::has_lxarxeh(); }
+inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::stwcx_(Register s, Register a, Register b)                             { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+inline void Assembler::stdcx_(Register s, Register a, Register b)                             { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+
+// Instructions for adjusting thread priority
+// for simultaneous multithreading (SMT) on POWER5.
+inline void Assembler::smt_prio_very_low()    { Assembler::or_unchecked(R31, R31, R31); }
+inline void Assembler::smt_prio_low()         { Assembler::or_unchecked(R1,  R1,  R1); }
+inline void Assembler::smt_prio_medium_low()  { Assembler::or_unchecked(R6,  R6,  R6); }
+inline void Assembler::smt_prio_medium()      { Assembler::or_unchecked(R2,  R2,  R2); }
+inline void Assembler::smt_prio_medium_high() { Assembler::or_unchecked(R5,  R5,  R5); }
+inline void Assembler::smt_prio_high()        { Assembler::or_unchecked(R3,  R3,  R3); }
+
+inline void Assembler::twi_0(Register a)      { twi_unchecked(0, a, 0);}
+
+// trap instructions
+inline void Assembler::tdi_unchecked(int tobits, Register a, int si16){                                     emit_int32( TDI_OPCODE | to(tobits) | ra(a) | si(si16)); }
+inline void Assembler::twi_unchecked(int tobits, Register a, int si16){                                     emit_int32( TWI_OPCODE | to(tobits) | ra(a) | si(si16)); }
+inline void Assembler::tdi(int tobits, Register a, int si16)          { assert(UseSIGTRAP, "precondition"); tdi_unchecked(tobits, a, si16);                      }
+inline void Assembler::twi(int tobits, Register a, int si16)          { assert(UseSIGTRAP, "precondition"); twi_unchecked(tobits, a, si16);                      }
+inline void Assembler::td( int tobits, Register a, Register b)        { assert(UseSIGTRAP, "precondition"); emit_int32( TD_OPCODE  | to(tobits) | ra(a) | rb(b)); }
+inline void Assembler::tw( int tobits, Register a, Register b)        { assert(UseSIGTRAP, "precondition"); emit_int32( TW_OPCODE  | to(tobits) | ra(a) | rb(b)); }
+
+// FLOATING POINT instructions ppc.
+// PPC 1, section 4.6.2 Floating-Point Load Instructions
+// Use ra0mem instead of ra in some instructions below.
+inline void Assembler::lfs( FloatRegister d, int si16, Register a)   { emit_int32( LFS_OPCODE  | frt(d) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::lfsu(FloatRegister d, int si16, Register a)   { emit_int32( LFSU_OPCODE | frt(d) | ra(a)     | simm(si16,16)); }
+inline void Assembler::lfsx(FloatRegister d, Register a, Register b) { emit_int32( LFSX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
+inline void Assembler::lfd( FloatRegister d, int si16, Register a)   { emit_int32( LFD_OPCODE  | frt(d) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::lfdu(FloatRegister d, int si16, Register a)   { emit_int32( LFDU_OPCODE | frt(d) | ra(a)     | simm(si16,16)); }
+inline void Assembler::lfdx(FloatRegister d, Register a, Register b) { emit_int32( LFDX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
+
+// PPC 1, section 4.6.3 Floating-Point Store Instructions
+// Use ra0mem instead of ra in some instructions below.
+inline void Assembler::stfs( FloatRegister s, int si16, Register a)  { emit_int32( STFS_OPCODE  | frs(s) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::stfsu(FloatRegister s, int si16, Register a)  { emit_int32( STFSU_OPCODE | frs(s) | ra(a)     | simm(si16,16)); }
+inline void Assembler::stfsx(FloatRegister s, Register a, Register b){ emit_int32( STFSX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
+inline void Assembler::stfd( FloatRegister s, int si16, Register a)  { emit_int32( STFD_OPCODE  | frs(s) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::stfdu(FloatRegister s, int si16, Register a)  { emit_int32( STFDU_OPCODE | frs(s) | ra(a)     | simm(si16,16)); }
+inline void Assembler::stfdx(FloatRegister s, Register a, Register b){ emit_int32( STFDX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
+
+// PPC 1, section 4.6.4 Floating-Point Move Instructions
+inline void Assembler::fmr( FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(1)); }
+
+// These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
+// on Power7.  Do not use.
+//inline void Assembler::mffgpr( FloatRegister d, Register b)   { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
+//inline void Assembler::mftgpr( Register d, FloatRegister b)   { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
+// add cmpb and popcntb to detect ppc power version.
+inline void Assembler::cmpb(   Register a, Register s, Register b) { guarantee(VM_Version::has_cmpb(), "opcode not supported on this hardware");
+                                                                     emit_int32( CMPB_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::popcntb(Register a, Register s)             { guarantee(VM_Version::has_popcntb(), "opcode not supported on this hardware");
+                                                                     emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::popcntw(Register a, Register s)             { guarantee(VM_Version::has_popcntw(), "opcode not supported on this hardware");
+                                                                     emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::popcntd(Register a, Register s)             { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
+
+inline void Assembler::fneg(  FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fneg_( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE  | frt(d) | frb(b) | rc(1)); }
+inline void Assembler::fabs(  FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fabs_( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE  | frt(d) | frb(b) | rc(1)); }
+inline void Assembler::fnabs( FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fnabs_(FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(1)); }
+
+// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
+inline void Assembler::fadd(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fadd_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fadds( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fadds_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fsub(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fsub_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fsubs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fsubs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fmul(  FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE  | frt(d) | fra(a) | frc(c) | rc(0)); }
+inline void Assembler::fmul_( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE  | frt(d) | fra(a) | frc(c) | rc(1)); }
+inline void Assembler::fmuls( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
+inline void Assembler::fmuls_(FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
+inline void Assembler::fdiv(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fdiv_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fdivs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fdivs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+
+// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
+inline void Assembler::frsp(  FloatRegister d, FloatRegister b) { emit_int32( FRSP_OPCODE   | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctid( FloatRegister d, FloatRegister b) { emit_int32( FCTID_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FCTIDZ_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fcfids(), "opcode not supported on this hardware");
+                                                                  emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
+
+// PPC 1, section 4.6.7 Floating-Point Compare Instructions
+inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
+
+// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
+inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrt(), "opcode not supported on this hardware");
+                                                                  emit_int32( FSQRT_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { guarantee(VM_Version::has_fsqrts(), "opcode not supported on this hardware");
+                                                                  emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
+
+// Vector instructions for >= Power6.
+inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvehx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvewx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvx(   VectorRegister d, Register s1, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvxl(  VectorRegister d, Register s1, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvebx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvehx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvewx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvx(  VectorRegister d, Register s1, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvxl( VectorRegister d, Register s1, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvsl(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvsr(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+
+inline void Assembler::vpkpx(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkswss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkshus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkswus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuhum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuwum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuhus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuwus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vupkhpx( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHPX_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupkhsb( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHSB_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupkhsh( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHSH_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklpx( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLPX_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklsb( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLSB_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklsh( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLSH_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vmrghb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHB_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrghw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHW_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrghh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHH_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLB_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLW_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLH_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsplt(   VectorRegister d, int ui4,          VectorRegister b) { emit_int32( VSPLT_OPCODE   | vrt(d) | vsplt_uim(uimm(ui4,4)) | vrb(b)); }
+inline void Assembler::vsplth(  VectorRegister d, int ui3,          VectorRegister b) { emit_int32( VSPLTH_OPCODE  | vrt(d) | vsplt_uim(uimm(ui3,3)) | vrb(b)); }
+inline void Assembler::vspltw(  VectorRegister d, int ui2,          VectorRegister b) { emit_int32( VSPLTW_OPCODE  | vrt(d) | vsplt_uim(uimm(ui2,2)) | vrb(b)); }
+inline void Assembler::vspltisb(VectorRegister d, int si5)                            { emit_int32( VSPLTISB_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vspltish(VectorRegister d, int si5)                            { emit_int32( VSPLTISH_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vspltisw(VectorRegister d, int si5)                            { emit_int32( VSPLTISW_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vperm(   VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+inline void Assembler::vsel(    VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE  | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+inline void Assembler::vsl(     VectorRegister d, VectorRegister a, VectorRegister b)                  { emit_int32( VSL_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsldoi(  VectorRegister d, VectorRegister a, VectorRegister b, int si4)         { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
+inline void Assembler::vslo(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsr(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsro(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsububm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsububs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulesb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuleub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulesh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuleuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulosb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuloub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulosh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulouh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmhaddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMHADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmhraddshs(VectorRegister d,VectorRegister a,VectorRegister b, VectorRegister c) { emit_int32( VMHRADDSHS_OPCODE| vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmladduhm(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMLADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsubuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUBUHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsummbm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMMBM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumshm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHS_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumuhs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHS_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vsumsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUMSWS_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum2sws(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM2SWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4sbs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4ubs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4UBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4shs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavguw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavguh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxuw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxuh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminuw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminuh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vcmpequb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtub(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtuh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtuw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpequh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpequw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vand(    VectorRegister d, VectorRegister a, VectorRegister b) { guarantee(VM_Version::has_vand(), "opcode not supported on this hardware");
+                                                                                        emit_int32( VAND_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vandc(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vnor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vor(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vxor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vslb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vskw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSKW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vslh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrab(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAB_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsraw(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAW_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrah(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAH_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::mtvscr(  VectorRegister b)                                     { emit_int32( MTVSCR_OPCODE   | vrb(b)); }
+inline void Assembler::mfvscr(  VectorRegister d)                                     { emit_int32( MFVSCR_OPCODE   | vrt(d)); }
+
+// ra0 version
+inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lwa(  Register d, int si16   ) { emit_int32( LWA_OPCODE  | rt(d) | ds(si16));}
+inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lhz(  Register d, int si16   ) { emit_int32( LHZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lha(  Register d, int si16   ) { emit_int32( LHA_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lbz(  Register d, int si16   ) { emit_int32( LBZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::ld(   Register d, int si16   ) { emit_int32( LD_OPCODE   | rt(d) | ds(si16));}
+inline void Assembler::ldx(  Register d, Register s2) { emit_int32( LDX_OPCODE  | rt(d) | rb(s2));}
+inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::stw(  Register d, int si16   ) { emit_int32( STW_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::sth(  Register d, int si16   ) { emit_int32( STH_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::stb(  Register d, int si16   ) { emit_int32( STB_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::std(  Register d, int si16   ) { emit_int32( STD_OPCODE  | rs(d) | ds(si16));}
+inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
+
+// ra0 version
+inline void Assembler::icbi(    Register s2)          { emit_int32( ICBI_OPCODE   | rb(s2)           ); }
+//inline void Assembler::dcba(  Register s2)          { emit_int32( DCBA_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbz(    Register s2)          { emit_int32( DCBZ_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbst(   Register s2)          { emit_int32( DCBST_OPCODE  | rb(s2)           ); }
+inline void Assembler::dcbf(    Register s2)          { emit_int32( DCBF_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbt(    Register s2)          { emit_int32( DCBT_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbtct(  Register s2, int ct)  { emit_int32( DCBT_OPCODE   | rb(s2) | thct(ct)); }
+inline void Assembler::dcbtds(  Register s2, int ds)  { emit_int32( DCBT_OPCODE   | rb(s2) | thds(ds)); }
+inline void Assembler::dcbtst(  Register s2)          { emit_int32( DCBTST_OPCODE | rb(s2)           ); }
+inline void Assembler::dcbtstct(Register s2, int ct)  { emit_int32( DCBTST_OPCODE | rb(s2) | thct(ct)); }
+
+// ra0 version
+inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::stwcx_(Register s, Register b)                            { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::stdcx_(Register s, Register b)                            { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+
+// ra0 version
+inline void Assembler::lfs( FloatRegister d, int si16)   { emit_int32( LFS_OPCODE  | frt(d) | simm(si16,16)); }
+inline void Assembler::lfsx(FloatRegister d, Register b) { emit_int32( LFSX_OPCODE | frt(d) | rb(b)); }
+inline void Assembler::lfd( FloatRegister d, int si16)   { emit_int32( LFD_OPCODE  | frt(d) | simm(si16,16)); }
+inline void Assembler::lfdx(FloatRegister d, Register b) { emit_int32( LFDX_OPCODE | frt(d) | rb(b)); }
+
+// ra0 version
+inline void Assembler::stfs( FloatRegister s, int si16)   { emit_int32( STFS_OPCODE  | frs(s) | simm(si16, 16)); }
+inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
+inline void Assembler::stfd( FloatRegister s, int si16)   { emit_int32( STFD_OPCODE  | frs(s) | simm(si16, 16)); }
+inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
+
+// ra0 version
+inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvx(   VectorRegister d, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | rb(s2)); }
+inline void Assembler::lvxl(  VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | rb(s2)); }
+inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvx(  VectorRegister d, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | rb(s2)); }
+inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler