changeset 21720:d915361cc3a1

moved asm, bytecode and asm.test code back to com.oracle.graal name space (JBS:GRAAL-53)
author Doug Simon <doug.simon@oracle.com>
date Thu, 04 Jun 2015 13:35:47 +0200
parents cbe8cc0f79ce
children 1a4c3fdee936
files graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/IncrementDecrementMacroTest.java graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/SimpleAssemblerTest.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Address.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64AsmOptions.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAddress.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCInstructionCounter.java graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCMacroAssembler.java graal/com.oracle.graal.asm.test/src/com/oracle/graal/asm/test/AssemblerTest.java graal/com.oracle.graal.asm/overview.html graal/com.oracle.graal.asm/src/com/oracle/graal/asm/AsmOptions.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Assembler.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Buffer.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Label.java graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java graal/com.oracle.graal.bytecode/overview.html graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeLookupSwitch.java graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeStream.java graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeSwitch.java graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeTableSwitch.java graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytecodes.java graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytes.java graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CheckGraalInvariants.java graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StaticInterfaceFieldTest.java graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64PrefetchOp.java graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64TailcallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCDeoptimizeOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotBackend.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallEpilogueOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCRuntimeCallPrologueOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotCounterOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotDeoptimizeCallerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotEnterUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerInCallerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotJumpToExceptionHandlerOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveCurrentStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveDeoptimizedStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotLeaveUnpackFramesStackFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotMove.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPatchReturnAddressOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotPushInterpreterFrameOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotReturnOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotSafepointOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotSpotUnwindOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectStaticCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCHotspotDirectVirtualCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCIndirectCallOp.java graal/com.oracle.graal.hotspot.sparc/src/com/oracle/graal/hotspot/sparc/SPARCPrefetchOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/CompileTheWorld.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotCounterOp.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/HotSpotInstructionProfiling.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/nodes/DimensionsNode.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/arraycopy/UnsafeArrayCopySnippets.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/DeoptimizationStub.java graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/UncommonTrapStub.java graal/com.oracle.graal.java/src/com/oracle/graal/java/BciBlockMapping.java graal/com.oracle.graal.java/src/com/oracle/graal/java/BytecodeDisassembler.java graal/com.oracle.graal.java/src/com/oracle/graal/java/BytecodeParser.java graal/com.oracle.graal.java/src/com/oracle/graal/java/FrameStateBuilder.java graal/com.oracle.graal.java/src/com/oracle/graal/java/LocalLiveness.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64AddressValue.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Arithmetic.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ArrayEqualsOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Binary.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BinaryConsumer.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64BreakpointOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ByteSwapOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64CCall.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Call.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ClearRegisterOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ControlFlow.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64FrameMap.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64LIRInstruction.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MathIntrinsicOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Move.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64MulDivOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64RestoreRegistersOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SaveRegistersOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ShiftOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64SignExtendOp.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64Unary.java graal/com.oracle.graal.lir.amd64/src/com/oracle/graal/lir/amd64/AMD64ZapRegistersOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCAddressValue.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArithmetic.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCArrayEqualsOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBitManipulationOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCBreakpointOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCByteSwapOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCall.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCCompare.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCControlFlow.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCDelayedControlTransfer.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCFrameMap.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCJumpOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCLIRInstruction.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMathIntrinsicOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCMove.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCSaveRegistersOp.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTailDelayedLIRInstruction.java graal/com.oracle.graal.lir.sparc/src/com/oracle/graal/lir/sparc/SPARCTestOp.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/LabelRef.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/StandardOp.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/SwitchStrategy.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilder.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/asm/CompilationResultBuilderFactory.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/framemap/FrameMap.java graal/com.oracle.graal.lir/src/com/oracle/graal/lir/gen/LIRGenerator.java graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/FrameState.java graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/DefaultJavaLoweringProvider.java graal/com.oracle.graal.test/src/com/oracle/graal/test/GraalTest.java graal/com.oracle.graal.truffle.hotspot.amd64/src/com/oracle/graal/truffle/hotspot/amd64/AMD64OptimizedCallTargetInstrumentationFactory.java graal/com.oracle.graal.truffle.hotspot.sparc/src/com/oracle/graal/truffle/hotspot/sparc/SPARCOptimizedCallTargetInstumentationFactory.java graal/com.oracle.graal.truffle.hotspot/src/com/oracle/graal/truffle/hotspot/OptimizedCallTargetInstrumentation.java graal/com.oracle.jvmci.asm.amd64.test/src/com/oracle/jvmci/asm/amd64/test/BitOpsTest.java graal/com.oracle.jvmci.asm.amd64.test/src/com/oracle/jvmci/asm/amd64/test/IncrementDecrementMacroTest.java graal/com.oracle.jvmci.asm.amd64.test/src/com/oracle/jvmci/asm/amd64/test/SimpleAssemblerTest.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Address.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64AsmOptions.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64Assembler.java graal/com.oracle.jvmci.asm.amd64/src/com/oracle/jvmci/asm/amd64/AMD64MacroAssembler.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAddress.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCAssembler.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCInstructionCounter.java graal/com.oracle.jvmci.asm.sparc/src/com/oracle/jvmci/asm/sparc/SPARCMacroAssembler.java graal/com.oracle.jvmci.asm.test/src/com/oracle/jvmci/asm/test/AssemblerTest.java graal/com.oracle.jvmci.asm/overview.html graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/AsmOptions.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Assembler.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Buffer.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/Label.java graal/com.oracle.jvmci.asm/src/com/oracle/jvmci/asm/NumUtil.java graal/com.oracle.jvmci.bytecode/overview.html graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/BytecodeLookupSwitch.java graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/BytecodeStream.java graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/BytecodeSwitch.java graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/BytecodeTableSwitch.java graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/Bytecodes.java graal/com.oracle.jvmci.bytecode/src/com/oracle/jvmci/bytecode/Bytes.java graal/com.oracle.jvmci.hotspot/src/com/oracle/jvmci/hotspot/HotSpotConstantPool.java graal/com.oracle.jvmci.test/src/com/oracle/jvmci/test/TestBase.java make/defs.make mx/mx_graal.py mx/suite.py
diffstat 179 files changed, 9082 insertions(+), 9086 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/BitOpsTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.graal.asm.amd64.test;
+
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.test.*;
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.amd64.AMD64.*;
+import com.oracle.jvmci.code.RegisterConfig;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.CallingConvention;
+import com.oracle.jvmci.code.CompilationResult;
+import com.oracle.jvmci.meta.Kind;
+
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.jvmci.code.ValueUtil.*;
+import static com.oracle.jvmci.common.UnsafeAccess.*;
+import static org.junit.Assume.*;
+
+import java.lang.reflect.*;
+import java.util.*;
+
+import org.junit.*;
+
+public class BitOpsTest extends AssemblerTest {
+    private static boolean lzcntSupported;
+    private static boolean tzcntSupported;
+
+    @Before
+    public void checkAMD64() {
+        assumeTrue("skipping AMD64 specific test", codeCache.getTarget().arch instanceof AMD64);
+        EnumSet<CPUFeature> features = ((AMD64) codeCache.getTarget().arch).getFeatures();
+        lzcntSupported = features.contains(CPUFeature.LZCNT);
+        tzcntSupported = features.contains(CPUFeature.BMI1);
+    }
+
+    @Test
+    public void lzcntlTest() {
+        if (lzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    Register arg = asRegister(cc.getArgument(0));
+                    LZCNT.emit(asm, DWORD, ret, arg);
+                    asm.ret(0);
+                    return asm.close(true);
+                }
+            };
+            assertReturn("intStub", test, 31, 1);
+        }
+    }
+
+    @Test
+    public void lzcntlMemTest() {
+        if (lzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    try {
+                        Field f = IntField.class.getDeclaredField("x");
+                        AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                        LZCNT.emit(asm, DWORD, ret, arg);
+                        asm.ret(0);
+                        return asm.close(true);
+                    } catch (Exception e) {
+                        throw new RuntimeException("exception while trying to generate field access:", e);
+                    }
+                }
+            };
+            assertReturn("intFieldStub", test, 31, new IntField(1));
+        }
+    }
+
+    @Test
+    public void lzcntqTest() {
+        if (lzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    Register arg = asRegister(cc.getArgument(0));
+                    LZCNT.emit(asm, QWORD, ret, arg);
+                    asm.ret(0);
+                    return asm.close(true);
+                }
+            };
+            assertReturn("longStub", test, 63, 1L);
+        }
+    }
+
+    @Test
+    public void lzcntqMemTest() {
+        if (lzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    try {
+                        Field f = LongField.class.getDeclaredField("x");
+                        AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                        LZCNT.emit(asm, QWORD, ret, arg);
+                        asm.ret(0);
+                        return asm.close(true);
+                    } catch (Exception e) {
+                        throw new RuntimeException("exception while trying to generate field access:", e);
+                    }
+                }
+            };
+            assertReturn("longFieldStub", test, 63, new LongField(1));
+        }
+    }
+
+    @Test
+    public void tzcntlTest() {
+        if (tzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    Register arg = asRegister(cc.getArgument(0));
+                    TZCNT.emit(asm, DWORD, ret, arg);
+                    asm.ret(0);
+                    return asm.close(true);
+                }
+            };
+            assertReturn("intStub", test, 31, 0x8000_0000);
+        }
+    }
+
+    @Test
+    public void tzcntlMemTest() {
+        if (tzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    try {
+                        Field f = IntField.class.getDeclaredField("x");
+                        AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                        TZCNT.emit(asm, DWORD, ret, arg);
+                        asm.ret(0);
+                        return asm.close(true);
+                    } catch (Exception e) {
+                        throw new RuntimeException("exception while trying to generate field access:", e);
+                    }
+                }
+            };
+            assertReturn("intFieldStub", test, 31, new IntField(0x8000_0000));
+        }
+    }
+
+    @Test
+    public void tzcntqTest() {
+        if (tzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    Register arg = asRegister(cc.getArgument(0));
+                    TZCNT.emit(asm, QWORD, ret, arg);
+                    asm.ret(0);
+                    return asm.close(true);
+                }
+            };
+            assertReturn("longStub", test, 63, 0x8000_0000_0000_0000L);
+        }
+    }
+
+    @Test
+    public void tzcntqMemTest() {
+        if (tzcntSupported) {
+            CodeGenTest test = new CodeGenTest() {
+
+                @Override
+                public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                    AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                    Register ret = registerConfig.getReturnRegister(Kind.Int);
+                    try {
+                        Field f = LongField.class.getDeclaredField("x");
+                        AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                        TZCNT.emit(asm, QWORD, ret, arg);
+                        asm.ret(0);
+                        return asm.close(true);
+                    } catch (Exception e) {
+                        throw new RuntimeException("exception while trying to generate field access:", e);
+                    }
+                }
+            };
+            assertReturn("longFieldStub", test, 63, new LongField(0x8000_0000_0000_0000L));
+        }
+    }
+
+    @SuppressWarnings("unused")
+    public static int intStub(int arg) {
+        return 0;
+    }
+
+    @SuppressWarnings("unused")
+    public static int longStub(long arg) {
+        return 0;
+    }
+
+    public static class IntField {
+        public int x;
+
+        IntField(int x) {
+            this.x = x;
+        }
+    }
+
+    public static class LongField {
+        public long x;
+
+        LongField(long x) {
+            this.x = x;
+        }
+    }
+
+    @SuppressWarnings("unused")
+    public static int intFieldStub(IntField arg) {
+        return 0;
+    }
+
+    @SuppressWarnings("unused")
+    public static int longFieldStub(LongField arg) {
+        return 0;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/IncrementDecrementMacroTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64.test;
+
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.test.*;
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.code.RegisterConfig;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.CallingConvention;
+import com.oracle.jvmci.code.CompilationResult;
+import com.oracle.jvmci.meta.Kind;
+
+import static com.oracle.jvmci.code.ValueUtil.*;
+import static com.oracle.jvmci.common.UnsafeAccess.*;
+import static org.junit.Assume.*;
+
+import java.lang.reflect.*;
+
+import org.junit.*;
+
+public class IncrementDecrementMacroTest extends AssemblerTest {
+
+    @Before
+    public void checkAMD64() {
+        assumeTrue("skipping AMD64 specific test", codeCache.getTarget().arch instanceof AMD64);
+    }
+
+    public static class LongField {
+        public long x;
+
+        LongField(long x) {
+            this.x = x;
+        }
+    }
+
+    private static class IncrementCodeGenTest implements CodeGenTest {
+        final int value;
+
+        public IncrementCodeGenTest(int value) {
+            this.value = value;
+        }
+
+        @Override
+        public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+            AMD64MacroAssembler asm = new AMD64MacroAssembler(target, registerConfig);
+            Register ret = registerConfig.getReturnRegister(Kind.Int);
+            try {
+                Field f = LongField.class.getDeclaredField("x");
+                AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                asm.incrementq(arg, value);
+                asm.movq(ret, arg);
+                asm.ret(0);
+                return asm.close(true);
+            } catch (Exception e) {
+                throw new RuntimeException("exception while trying to generate field access:", e);
+            }
+        }
+    }
+
+    private void assertIncrement(long initValue, int increment) {
+        assertReturn("longFieldStubIncrement", new IncrementCodeGenTest(increment), initValue + increment, new LongField(initValue));
+    }
+
+    private void assertIncrements(int increment) {
+        assertIncrement(0x4242_4242_4242_4242L, increment);
+    }
+
+    @SuppressWarnings("unused")
+    public static long longFieldStubIncrement(LongField arg) {
+        return 0;
+    }
+
+    private static class DecrementCodeGenTest implements CodeGenTest {
+        final int value;
+
+        public DecrementCodeGenTest(int value) {
+            this.value = value;
+        }
+
+        @Override
+        public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+            AMD64MacroAssembler asm = new AMD64MacroAssembler(target, registerConfig);
+            Register ret = registerConfig.getReturnRegister(Kind.Int);
+            try {
+                Field f = LongField.class.getDeclaredField("x");
+                AMD64Address arg = new AMD64Address(asRegister(cc.getArgument(0)), (int) unsafe.objectFieldOffset(f));
+                asm.decrementq(arg, value);
+                asm.movq(ret, arg);
+                asm.ret(0);
+                return asm.close(true);
+            } catch (Exception e) {
+                throw new RuntimeException("exception while trying to generate field access:", e);
+            }
+        }
+    }
+
+    private void assertDecrement(long initValue, int increment) {
+        assertReturn("longFieldStubDecrement", new DecrementCodeGenTest(increment), initValue - increment, new LongField(initValue));
+    }
+
+    private void assertDecrements(int increment) {
+        assertDecrement(0x4242_4242_4242_4242L, increment);
+    }
+
+    @SuppressWarnings("unused")
+    public static long longFieldStubDecrement(LongField arg) {
+        return 0;
+    }
+
+    @Test
+    public void incrementMemTest0() {
+        int increment = 0;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest1() {
+        int increment = 1;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest2() {
+        int increment = 2;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest3() {
+        int increment = Integer.MAX_VALUE;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest4() {
+        int increment = Integer.MIN_VALUE;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest5() {
+        int increment = -1;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest6() {
+        int increment = -2;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void incrementMemTest7() {
+        int increment = -0x1000_0000;
+        assertIncrements(increment);
+    }
+
+    @Test
+    public void decrementMemTest0() {
+        int decrement = 0;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest1() {
+        int decrement = 1;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest2() {
+        int decrement = 2;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest3() {
+        int decrement = Integer.MAX_VALUE;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest4() {
+        int decrement = Integer.MIN_VALUE;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest5() {
+        int decrement = -1;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest6() {
+        int decrement = -2;
+        assertDecrements(decrement);
+    }
+
+    @Test
+    public void decrementMemTest7() {
+        int decrement = -0x1000_0000;
+        assertDecrements(decrement);
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64.test/src/com/oracle/graal/asm/amd64/test/SimpleAssemblerTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64.test;
+
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.test.*;
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.code.RegisterConfig;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.CallingConvention;
+import com.oracle.jvmci.code.CompilationResult;
+import com.oracle.jvmci.meta.JavaConstant;
+import com.oracle.jvmci.meta.Kind;
+
+import static org.junit.Assume.*;
+
+import java.nio.*;
+
+import org.junit.*;
+
+import com.oracle.jvmci.code.CompilationResult.DataSectionReference;
+import com.oracle.jvmci.code.DataSection.Data;
+import com.oracle.jvmci.code.DataSection.DataBuilder;
+
+public class SimpleAssemblerTest extends AssemblerTest {
+
+    @Before
+    public void checkAMD64() {
+        assumeTrue("skipping AMD64 specific test", codeCache.getTarget().arch instanceof AMD64);
+    }
+
+    @Test
+    public void intTest() {
+        CodeGenTest test = new CodeGenTest() {
+
+            @Override
+            public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                AMD64Assembler asm = new AMD64Assembler(target, registerConfig);
+                Register ret = registerConfig.getReturnRegister(Kind.Int);
+                asm.movl(ret, 8472);
+                asm.ret(0);
+                return asm.close(true);
+            }
+        };
+        assertReturn("intStub", test, 8472);
+    }
+
+    @Test
+    public void doubleTest() {
+        CodeGenTest test = new CodeGenTest() {
+
+            @Override
+            public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                AMD64MacroAssembler asm = new AMD64MacroAssembler(target, registerConfig);
+                Register ret = registerConfig.getReturnRegister(Kind.Double);
+                Data data = new Data(8, 8, DataBuilder.serializable(JavaConstant.forDouble(84.72)));
+                DataSectionReference ref = compResult.getDataSection().insertData(data);
+                compResult.recordDataPatch(asm.position(), ref);
+                asm.movdbl(ret, asm.getPlaceholder());
+                asm.ret(0);
+                return asm.close(true);
+            }
+        };
+        assertReturn("doubleStub", test, 84.72);
+    }
+
+    @Test
+    public void rawDoubleTest() {
+        CodeGenTest test = new CodeGenTest() {
+
+            @Override
+            public byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc) {
+                AMD64MacroAssembler asm = new AMD64MacroAssembler(target, registerConfig);
+                Register ret = registerConfig.getReturnRegister(Kind.Double);
+
+                byte[] rawBytes = new byte[8];
+                ByteBuffer.wrap(rawBytes).order(ByteOrder.nativeOrder()).putDouble(84.72);
+                Data data = new Data(8, 8, DataBuilder.raw(rawBytes));
+                DataSectionReference ref = compResult.getDataSection().insertData(data);
+                compResult.recordDataPatch(asm.position(), ref);
+                asm.movdbl(ret, asm.getPlaceholder());
+                asm.ret(0);
+                return asm.close(true);
+            }
+        };
+        assertReturn("doubleStub", test, 84.72);
+    }
+
+    public static int intStub() {
+        return 0;
+    }
+
+    public static double doubleStub() {
+        return 0.0;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Address.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64;
+
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.AbstractAddress;
+
+/**
+ * Represents an address in target machine memory, specified via some combination of a base
+ * register, an index register, a displacement and a scale. Note that the base and index registers
+ * may be a variable that will get a register assigned later by the register allocator.
+ */
+public final class AMD64Address extends AbstractAddress {
+
+    private final Register base;
+    private final Register index;
+    private final Scale scale;
+    private final int displacement;
+
+    /**
+     * Creates an {@link AMD64Address} with given base register, no scaling and no displacement.
+     *
+     * @param base the base register
+     */
+    public AMD64Address(Register base) {
+        this(base, Register.None, Scale.Times1, 0);
+    }
+
+    /**
+     * Creates an {@link AMD64Address} with given base register, no scaling and a given
+     * displacement.
+     *
+     * @param base the base register
+     * @param displacement the displacement
+     */
+    public AMD64Address(Register base, int displacement) {
+        this(base, Register.None, Scale.Times1, displacement);
+    }
+
+    /**
+     * Creates an {@link AMD64Address} with given base and index registers, scaling and
+     * displacement. This is the most general constructor.
+     *
+     * @param base the base register
+     * @param index the index register
+     * @param scale the scaling factor
+     * @param displacement the displacement
+     */
+    public AMD64Address(Register base, Register index, Scale scale, int displacement) {
+        this.base = base;
+        this.index = index;
+        this.scale = scale;
+        this.displacement = displacement;
+
+        assert scale != null;
+    }
+
+    /**
+     * A scaling factor used in the SIB addressing mode.
+     */
+    public enum Scale {
+        Times1(1, 0),
+        Times2(2, 1),
+        Times4(4, 2),
+        Times8(8, 3);
+
+        private Scale(int value, int log2) {
+            this.value = value;
+            this.log2 = log2;
+        }
+
+        /**
+         * The value (or multiplier) of this scale.
+         */
+        public final int value;
+
+        /**
+         * The {@linkplain #value value} of this scale log 2.
+         */
+        public final int log2;
+
+        public static Scale fromInt(int scale) {
+            switch (scale) {
+                case 1:
+                    return Times1;
+                case 2:
+                    return Times2;
+                case 4:
+                    return Times4;
+                case 8:
+                    return Times8;
+                default:
+                    return null;
+            }
+        }
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder s = new StringBuilder();
+        s.append("[");
+        String sep = "";
+        if (!getBase().equals(Register.None)) {
+            s.append(getBase());
+            sep = " + ";
+        }
+        if (!getIndex().equals(Register.None)) {
+            s.append(sep).append(getIndex()).append(" * ").append(getScale().value);
+            sep = " + ";
+        }
+        if (getDisplacement() < 0) {
+            s.append(" - ").append(-getDisplacement());
+        } else if (getDisplacement() > 0) {
+            s.append(sep).append(getDisplacement());
+        }
+        s.append("]");
+        return s.toString();
+    }
+
+    /**
+     * @return Base register that defines the start of the address computation. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getBase() {
+        return base;
+    }
+
+    /**
+     * @return Index register, the value of which (possibly scaled by {@link #getScale}) is added to
+     *         {@link #getBase}. If not present, is denoted by {@link Register#None}.
+     */
+    public Register getIndex() {
+        return index;
+    }
+
+    /**
+     * @return Scaling factor for indexing, dependent on target operand size.
+     */
+    public Scale getScale() {
+        return scale;
+    }
+
+    /**
+     * @return Optional additive displacement.
+     */
+    public int getDisplacement() {
+        return displacement;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64AsmOptions.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64;
+
+public class AMD64AsmOptions {
+    public static final boolean UseNormalNop = false;
+    public static final boolean UseAddressNop = true;
+    public static final boolean UseIncDec = true;
+    public static final boolean UseXmmLoadAndClearUpper = true;
+    public static final boolean UseXmmRegToRegMoveAll = true;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64Assembler.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,2445 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64;
+
+import com.oracle.graal.asm.*;
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.amd64.AMD64.*;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.RegisterConfig;
+
+import static com.oracle.graal.asm.NumUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.jvmci.amd64.AMD64.*;
+import static com.oracle.jvmci.code.MemoryBarriers.*;
+
+import com.oracle.jvmci.code.Register.RegisterCategory;
+
+/**
+ * This class implements an assembler that can encode most X86 instructions.
+ */
+public class AMD64Assembler extends Assembler {
+
+    private static final int MinEncodingNeedsRex = 8;
+
+    /**
+     * A sentinel value used as a place holder in an instruction stream for an address that will be
+     * patched.
+     */
+    private static final AMD64Address Placeholder = new AMD64Address(rip);
+
+    /**
+     * The x86 condition codes used for conditional jumps/moves.
+     */
+    public enum ConditionFlag {
+        Zero(0x4, "|zero|"),
+        NotZero(0x5, "|nzero|"),
+        Equal(0x4, "="),
+        NotEqual(0x5, "!="),
+        Less(0xc, "<"),
+        LessEqual(0xe, "<="),
+        Greater(0xf, ">"),
+        GreaterEqual(0xd, ">="),
+        Below(0x2, "|<|"),
+        BelowEqual(0x6, "|<=|"),
+        Above(0x7, "|>|"),
+        AboveEqual(0x3, "|>=|"),
+        Overflow(0x0, "|of|"),
+        NoOverflow(0x1, "|nof|"),
+        CarrySet(0x2, "|carry|"),
+        CarryClear(0x3, "|ncarry|"),
+        Negative(0x8, "|neg|"),
+        Positive(0x9, "|pos|"),
+        Parity(0xa, "|par|"),
+        NoParity(0xb, "|npar|");
+
+        private final int value;
+        private final String operator;
+
+        private ConditionFlag(int value, String operator) {
+            this.value = value;
+            this.operator = operator;
+        }
+
+        public ConditionFlag negate() {
+            switch (this) {
+                case Zero:
+                    return NotZero;
+                case NotZero:
+                    return Zero;
+                case Equal:
+                    return NotEqual;
+                case NotEqual:
+                    return Equal;
+                case Less:
+                    return GreaterEqual;
+                case LessEqual:
+                    return Greater;
+                case Greater:
+                    return LessEqual;
+                case GreaterEqual:
+                    return Less;
+                case Below:
+                    return AboveEqual;
+                case BelowEqual:
+                    return Above;
+                case Above:
+                    return BelowEqual;
+                case AboveEqual:
+                    return Below;
+                case Overflow:
+                    return NoOverflow;
+                case NoOverflow:
+                    return Overflow;
+                case CarrySet:
+                    return CarryClear;
+                case CarryClear:
+                    return CarrySet;
+                case Negative:
+                    return Positive;
+                case Positive:
+                    return Negative;
+                case Parity:
+                    return NoParity;
+                case NoParity:
+                    return Parity;
+            }
+            throw new IllegalArgumentException();
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        @Override
+        public String toString() {
+            return operator;
+        }
+    }
+
+    /**
+     * Constants for X86 prefix bytes.
+     */
+    private static class Prefix {
+
+        private static final int REX = 0x40;
+        private static final int REXB = 0x41;
+        private static final int REXX = 0x42;
+        private static final int REXXB = 0x43;
+        private static final int REXR = 0x44;
+        private static final int REXRB = 0x45;
+        private static final int REXRX = 0x46;
+        private static final int REXRXB = 0x47;
+        private static final int REXW = 0x48;
+        private static final int REXWB = 0x49;
+        private static final int REXWX = 0x4A;
+        private static final int REXWXB = 0x4B;
+        private static final int REXWR = 0x4C;
+        private static final int REXWRB = 0x4D;
+        private static final int REXWRX = 0x4E;
+        private static final int REXWRXB = 0x4F;
+    }
+
+    /**
+     * The x86 operand sizes.
+     */
+    public static enum OperandSize {
+        BYTE(1) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            }
+        },
+
+        WORD(2, 0x66) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                assert imm == (short) imm;
+                asm.emitShort(imm);
+            }
+        },
+
+        DWORD(4) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        QWORD(8) {
+            @Override
+            protected void emitImmediate(AMD64Assembler asm, int imm) {
+                asm.emitInt(imm);
+            }
+        },
+
+        SS(4, 0xF3, true),
+
+        SD(8, 0xF2, true),
+
+        PS(16, true),
+
+        PD(16, 0x66, true);
+
+        private final int sizePrefix;
+
+        private final int bytes;
+        private final boolean xmm;
+
+        private OperandSize(int bytes) {
+            this(bytes, 0);
+        }
+
+        private OperandSize(int bytes, int sizePrefix) {
+            this(bytes, sizePrefix, false);
+        }
+
+        private OperandSize(int bytes, boolean xmm) {
+            this(bytes, 0, xmm);
+        }
+
+        private OperandSize(int bytes, int sizePrefix, boolean xmm) {
+            this.sizePrefix = sizePrefix;
+            this.bytes = bytes;
+            this.xmm = xmm;
+        }
+
+        public int getBytes() {
+            return bytes;
+        }
+
+        public boolean isXmmType() {
+            return xmm;
+        }
+
+        /**
+         * Emit an immediate of this size. Note that immediate {@link #QWORD} operands are encoded
+         * as sign-extended 32-bit values.
+         *
+         * @param asm
+         * @param imm
+         */
+        protected void emitImmediate(AMD64Assembler asm, int imm) {
+            assert false;
+        }
+    }
+
+    /**
+     * Operand size and register type constraints.
+     */
+    private static enum OpAssertion {
+        ByteAssertion(CPU, CPU, BYTE),
+        IntegerAssertion(CPU, CPU, WORD, DWORD, QWORD),
+        No16BitAssertion(CPU, CPU, DWORD, QWORD),
+        QwordOnlyAssertion(CPU, CPU, QWORD),
+        FloatingAssertion(XMM, XMM, SS, SD, PS, PD),
+        PackedFloatingAssertion(XMM, XMM, PS, PD),
+        SingleAssertion(XMM, XMM, SS),
+        DoubleAssertion(XMM, XMM, SD),
+        IntToFloatingAssertion(XMM, CPU, DWORD, QWORD),
+        FloatingToIntAssertion(CPU, XMM, DWORD, QWORD);
+
+        private final RegisterCategory resultCategory;
+        private final RegisterCategory inputCategory;
+        private final OperandSize[] allowedSizes;
+
+        private OpAssertion(RegisterCategory resultCategory, RegisterCategory inputCategory, OperandSize... allowedSizes) {
+            this.resultCategory = resultCategory;
+            this.inputCategory = inputCategory;
+            this.allowedSizes = allowedSizes;
+        }
+
+        protected boolean checkOperands(AMD64Op op, OperandSize size, Register resultReg, Register inputReg) {
+            assert resultReg == null || resultCategory.equals(resultReg.getRegisterCategory()) : "invalid result register " + resultReg + " used in " + op;
+            assert inputReg == null || inputCategory.equals(inputReg.getRegisterCategory()) : "invalid input register " + inputReg + " used in " + op;
+
+            for (OperandSize s : allowedSizes) {
+                if (size == s) {
+                    return true;
+                }
+            }
+
+            assert false : "invalid operand size " + size + " used in " + op;
+            return false;
+        }
+    }
+
+    /**
+     * The register to which {@link Register#Frame} and {@link Register#CallerFrame} are bound.
+     */
+    public final Register frameRegister;
+
+    /**
+     * Constructs an assembler for the AMD64 architecture.
+     *
+     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
+     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
+     *            assembler instance will not be used to assemble instructions using these logical
+     *            registers.
+     */
+    public AMD64Assembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target);
+        this.frameRegister = registerConfig == null ? null : registerConfig.getFrameRegister();
+    }
+
+    private boolean supports(CPUFeature feature) {
+        return ((AMD64) target.arch).getFeatures().contains(feature);
+    }
+
+    private static int encode(Register r) {
+        assert r.encoding < 16 && r.encoding >= 0 : "encoding out of range: " + r.encoding;
+        return r.encoding & 0x7;
+    }
+
+    /**
+     * Get RXB bits for register-register instruction. In that encoding, ModRM.rm contains a
+     * register index. The R bit extends the ModRM.reg field and the B bit extends the ModRM.rm
+     * field. The X bit must be 0.
+     */
+    protected static int getRXB(Register reg, Register rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        rxb |= (rm == null ? 0 : rm.encoding & 0x08) >> 3;
+        return rxb;
+    }
+
+    /**
+     * Get RXB bits for register-memory instruction. The R bit extends the ModRM.reg field. There
+     * are two cases for the memory operand:<br>
+     * ModRM.rm contains the base register: In that case, B extends the ModRM.rm field and X = 0.<br>
+     * There is an SIB byte: In that case, X extends SIB.index and B extends SIB.base.
+     */
+    protected static int getRXB(Register reg, AMD64Address rm) {
+        int rxb = (reg == null ? 0 : reg.encoding & 0x08) >> 1;
+        if (!rm.getIndex().equals(Register.None)) {
+            rxb |= (rm.getIndex().encoding & 0x08) >> 2;
+        }
+        if (!rm.getBase().equals(Register.None)) {
+            rxb |= (rm.getBase().encoding & 0x08) >> 3;
+        }
+        return rxb;
+    }
+
+    /**
+     * Emit the ModR/M byte for one register operand and an opcode extension in the R field.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(int reg, Register rm) {
+        assert (reg & 0x07) == reg;
+        emitByte(0xC0 | (reg << 3) | (rm.encoding & 0x07));
+    }
+
+    /**
+     * Emit the ModR/M byte for two register operands.
+     * <p>
+     * Format: [ 11 reg r/m ]
+     */
+    protected void emitModRM(Register reg, Register rm) {
+        emitModRM(reg.encoding & 0x07, rm);
+    }
+
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one register and one memory operand.
+     */
+    protected void emitOperandHelper(Register reg, AMD64Address addr) {
+        assert !reg.equals(Register.None);
+        emitOperandHelper(encode(reg), addr);
+    }
+
+    /**
+     * Emits the ModR/M byte and optionally the SIB byte for one memory operand and an opcode
+     * extension in the R field.
+     */
+    protected void emitOperandHelper(int reg, AMD64Address addr) {
+        assert (reg & 0x07) == reg;
+        int regenc = reg << 3;
+
+        Register base = addr.getBase();
+        Register index = addr.getIndex();
+
+        AMD64Address.Scale scale = addr.getScale();
+        int disp = addr.getDisplacement();
+
+        if (base.equals(Register.Frame)) {
+            assert frameRegister != null : "cannot use register " + Register.Frame + " in assembler with null register configuration";
+            base = frameRegister;
+        }
+
+        if (base.equals(AMD64.rip)) { // also matches Placeholder
+            // [00 000 101] disp32
+            assert index.equals(Register.None) : "cannot use RIP relative addressing with index register";
+            emitByte(0x05 | regenc);
+            emitInt(disp);
+        } else if (base.isValid()) {
+            int baseenc = base.isValid() ? encode(base) : 0;
+            if (index.isValid()) {
+                int indexenc = encode(index) << 3;
+                // [base + indexscale + disp]
+                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
+                    // [base + indexscale]
+                    // [00 reg 100][ss index base]
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x04 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                } else if (isByte(disp)) {
+                    // [base + indexscale + imm8]
+                    // [01 reg 100][ss index base] imm8
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x44 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [base + indexscale + disp32]
+                    // [10 reg 100][ss index base] disp32
+                    assert !index.equals(rsp) : "illegal addressing mode";
+                    emitByte(0x84 | regenc);
+                    emitByte(scale.log2 << 6 | indexenc | baseenc);
+                    emitInt(disp);
+                }
+            } else if (base.equals(rsp) || base.equals(r12)) {
+                // [rsp + disp]
+                if (disp == 0) {
+                    // [rsp]
+                    // [00 reg 100][00 100 100]
+                    emitByte(0x04 | regenc);
+                    emitByte(0x24);
+                } else if (isByte(disp)) {
+                    // [rsp + imm8]
+                    // [01 reg 100][00 100 100] disp8
+                    emitByte(0x44 | regenc);
+                    emitByte(0x24);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [rsp + imm32]
+                    // [10 reg 100][00 100 100] disp32
+                    emitByte(0x84 | regenc);
+                    emitByte(0x24);
+                    emitInt(disp);
+                }
+            } else {
+                // [base + disp]
+                assert !base.equals(rsp) && !base.equals(r12) : "illegal addressing mode";
+                if (disp == 0 && !base.equals(rbp) && !base.equals(r13)) {
+                    // [base]
+                    // [00 reg base]
+                    emitByte(0x00 | regenc | baseenc);
+                } else if (isByte(disp)) {
+                    // [base + disp8]
+                    // [01 reg base] disp8
+                    emitByte(0x40 | regenc | baseenc);
+                    emitByte(disp & 0xFF);
+                } else {
+                    // [base + disp32]
+                    // [10 reg base] disp32
+                    emitByte(0x80 | regenc | baseenc);
+                    emitInt(disp);
+                }
+            }
+        } else {
+            if (index.isValid()) {
+                int indexenc = encode(index) << 3;
+                // [indexscale + disp]
+                // [00 reg 100][ss index 101] disp32
+                assert !index.equals(rsp) : "illegal addressing mode";
+                emitByte(0x04 | regenc);
+                emitByte(scale.log2 << 6 | indexenc | 0x05);
+                emitInt(disp);
+            } else {
+                // [disp] ABSOLUTE
+                // [00 reg 100][00 100 101] disp32
+                emitByte(0x04 | regenc);
+                emitByte(0x25);
+                emitInt(disp);
+            }
+        }
+    }
+
+    /**
+     * Base class for AMD64 opcodes.
+     */
+    public static class AMD64Op {
+
+        protected static final int P_0F = 0x0F;
+        protected static final int P_0F38 = 0x380F;
+        protected static final int P_0F3A = 0x3A0F;
+
+        private final String opcode;
+
+        private final int prefix1;
+        private final int prefix2;
+        private final int op;
+
+        private final boolean dstIsByte;
+        private final boolean srcIsByte;
+
+        private final OpAssertion assertion;
+        private final CPUFeature feature;
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, assertion == OpAssertion.ByteAssertion, assertion == OpAssertion.ByteAssertion, assertion, feature);
+        }
+
+        protected AMD64Op(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            this.opcode = opcode;
+            this.prefix1 = prefix1;
+            this.prefix2 = prefix2;
+            this.op = op;
+
+            this.dstIsByte = dstIsByte;
+            this.srcIsByte = srcIsByte;
+
+            this.assertion = assertion;
+            this.feature = feature;
+        }
+
+        protected final void emitOpcode(AMD64Assembler asm, OperandSize size, int rxb, int dstEnc, int srcEnc) {
+            if (prefix1 != 0) {
+                asm.emitByte(prefix1);
+            }
+            if (size.sizePrefix != 0) {
+                asm.emitByte(size.sizePrefix);
+            }
+            int rexPrefix = 0x40 | rxb;
+            if (size == QWORD) {
+                rexPrefix |= 0x08;
+            }
+            if (rexPrefix != 0x40 || (dstIsByte && dstEnc >= 4) || (srcIsByte && srcEnc >= 4)) {
+                asm.emitByte(rexPrefix);
+            }
+            if (prefix2 > 0xFF) {
+                asm.emitShort(prefix2);
+            } else if (prefix2 > 0) {
+                asm.emitByte(prefix2);
+            }
+            asm.emitByte(op);
+        }
+
+        protected final boolean verify(AMD64Assembler asm, OperandSize size, Register resultReg, Register inputReg) {
+            assert feature == null || asm.supports(feature) : String.format("unsupported feature %s required for %s", feature, opcode);
+            assert assertion.checkOperands(this, size, resultReg, inputReg);
+            return true;
+        }
+
+        @Override
+        public String toString() {
+            return opcode;
+        }
+    }
+
+    /**
+     * Base class for AMD64 opcodes with immediate operands.
+     */
+    public static class AMD64ImmOp extends AMD64Op {
+
+        private final boolean immIsByte;
+
+        protected AMD64ImmOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.immIsByte = immIsByte;
+        }
+
+        protected final void emitImmediate(AMD64Assembler asm, OperandSize size, int imm) {
+            if (immIsByte) {
+                assert imm == (byte) imm;
+                asm.emitByte(imm);
+            } else {
+                size.emitImmediate(asm, imm);
+            }
+        }
+    }
+
+    /**
+     * Opcode with operand order of either RM or MR.
+     */
+    public abstract static class AMD64RROp extends AMD64Op {
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        protected AMD64RROp(String opcode, int prefix1, int prefix2, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, dstIsByte, srcIsByte, assertion, feature);
+        }
+
+        public abstract void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src);
+    }
+
+    /**
+     * Opcode with operand order of RM.
+     */
+    public static class AMD64RMOp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64RMOp IMUL   = new AMD64RMOp("IMUL",         P_0F, 0xAF);
+        public static final AMD64RMOp BSF    = new AMD64RMOp("BSF",          P_0F, 0xBC);
+        public static final AMD64RMOp BSR    = new AMD64RMOp("BSR",          P_0F, 0xBD);
+        public static final AMD64RMOp POPCNT = new AMD64RMOp("POPCNT", 0xF3, P_0F, 0xB8, CPUFeature.POPCNT);
+        public static final AMD64RMOp TZCNT  = new AMD64RMOp("TZCNT",  0xF3, P_0F, 0xBC, CPUFeature.BMI1);
+        public static final AMD64RMOp LZCNT  = new AMD64RMOp("LZCNT",  0xF3, P_0F, 0xBD, CPUFeature.LZCNT);
+        public static final AMD64RMOp MOVZXB = new AMD64RMOp("MOVZXB",       P_0F, 0xB6, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVZX  = new AMD64RMOp("MOVZX",        P_0F, 0xB7, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXB = new AMD64RMOp("MOVSXB",       P_0F, 0xBE, false, true, OpAssertion.IntegerAssertion);
+        public static final AMD64RMOp MOVSX  = new AMD64RMOp("MOVSX",        P_0F, 0xBF, OpAssertion.No16BitAssertion);
+        public static final AMD64RMOp MOVSXD = new AMD64RMOp("MOVSXD",             0x63, OpAssertion.QwordOnlyAssertion);
+        public static final AMD64RMOp MOVB   = new AMD64RMOp("MOVB",               0x8A, OpAssertion.ByteAssertion);
+        public static final AMD64RMOp MOV    = new AMD64RMOp("MOV",                0x8B);
+
+        // MOVD/MOVQ and MOVSS/MOVSD are the same opcode, just with different operand size prefix
+        public static final AMD64RMOp MOVD   = new AMD64RMOp("MOVD",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64RMOp MOVQ   = new AMD64RMOp("MOVQ",   0x66, P_0F, 0x6E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64RMOp MOVSS  = new AMD64RMOp("MOVSS",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        public static final AMD64RMOp MOVSD  = new AMD64RMOp("MOVSD",        P_0F, 0x10, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+
+        // TEST is documented as MR operation, but it's symmetric, and using it as RM operation is more convenient.
+        public static final AMD64RMOp TESTB  = new AMD64RMOp("TEST",               0x84, OpAssertion.ByteAssertion);
+        public static final AMD64RMOp TEST   = new AMD64RMOp("TEST",               0x85);
+        // @formatter:on
+
+        protected AMD64RMOp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64RMOp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op) {
+            this(opcode, 0, prefix, op, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, 0, prefix, op, assertion, feature);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix, int op, boolean dstIsByte, boolean srcIsByte, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, dstIsByte, srcIsByte, assertion, null);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, CPUFeature feature) {
+            this(opcode, prefix1, prefix2, op, OpAssertion.IntegerAssertion, feature);
+        }
+
+        protected AMD64RMOp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+        }
+    }
+
+    /**
+     * Opcode with operand order of MR.
+     */
+    public static class AMD64MROp extends AMD64RROp {
+        // @formatter:off
+        public static final AMD64MROp MOVB   = new AMD64MROp("MOVB",               0x88, OpAssertion.ByteAssertion);
+        public static final AMD64MROp MOV    = new AMD64MROp("MOV",                0x89);
+
+        // MOVD and MOVQ are the same opcode, just with different operand size prefix
+        // Note that as MR opcodes, they have reverse operand order, so the IntToFloatingAssertion must be used.
+        public static final AMD64MROp MOVD   = new AMD64MROp("MOVD",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+        public static final AMD64MROp MOVQ   = new AMD64MROp("MOVQ",   0x66, P_0F, 0x7E, OpAssertion.IntToFloatingAssertion, CPUFeature.SSE2);
+
+        // MOVSS and MOVSD are the same opcode, just with different operand size prefix
+        public static final AMD64MROp MOVSS  = new AMD64MROp("MOVSS",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        public static final AMD64MROp MOVSD  = new AMD64MROp("MOVSD",        P_0F, 0x11, OpAssertion.FloatingAssertion, CPUFeature.SSE);
+        // @formatter:on
+
+        protected AMD64MROp(String opcode, int op) {
+            this(opcode, 0, op);
+        }
+
+        protected AMD64MROp(String opcode, int op, OpAssertion assertion) {
+            this(opcode, 0, op, assertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, prefix, op, assertion, null);
+        }
+
+        protected AMD64MROp(String opcode, int prefix, int op, OpAssertion assertion, CPUFeature feature) {
+            this(opcode, 0, prefix, op, assertion, feature);
+        }
+
+        protected AMD64MROp(String opcode, int prefix1, int prefix2, int op, OpAssertion assertion, CPUFeature feature) {
+            super(opcode, prefix1, prefix2, op, assertion, feature);
+        }
+
+        @Override
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src) {
+            assert verify(asm, size, src, dst);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, dst.encoding);
+            asm.emitModRM(src, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, Register src) {
+            assert verify(asm, size, null, src);
+            emitOpcode(asm, size, getRXB(src, dst), src.encoding, 0);
+            asm.emitOperandHelper(src, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of M.
+     */
+    public static class AMD64MOp extends AMD64Op {
+        // @formatter:off
+        public static final AMD64MOp NOT  = new AMD64MOp("NOT",  0xF7, 2);
+        public static final AMD64MOp NEG  = new AMD64MOp("NEG",  0xF7, 3);
+        public static final AMD64MOp MUL  = new AMD64MOp("MUL",  0xF7, 4);
+        public static final AMD64MOp IMUL = new AMD64MOp("IMUL", 0xF7, 5);
+        public static final AMD64MOp DIV  = new AMD64MOp("DIV",  0xF7, 6);
+        public static final AMD64MOp IDIV = new AMD64MOp("IDIV", 0xF7, 7);
+        public static final AMD64MOp INC  = new AMD64MOp("INC",  0xFF, 0);
+        public static final AMD64MOp DEC  = new AMD64MOp("DEC",  0xFF, 1);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MOp(String opcode, int op, int ext) {
+            this(opcode, 0, op, ext);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext) {
+            this(opcode, prefix, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MOp(String opcode, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, 0, prefix, op, assertion, null);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of MI.
+     */
+    public static class AMD64MIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64MIOp MOVB = new AMD64MIOp("MOVB", true,  0xC6, 0, OpAssertion.ByteAssertion);
+        public static final AMD64MIOp MOV  = new AMD64MIOp("MOV",  false, 0xC7, 0);
+        public static final AMD64MIOp TEST = new AMD64MIOp("TEST", false, 0xF7, 0);
+        // @formatter:on
+
+        private final int ext;
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext) {
+            this(opcode, immIsByte, op, ext, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int op, int ext, OpAssertion assertion) {
+            this(opcode, immIsByte, 0, op, ext, assertion);
+        }
+
+        protected AMD64MIOp(String opcode, boolean immIsByte, int prefix, int op, int ext, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+            this.ext = ext;
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, dst.encoding);
+            asm.emitModRM(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, AMD64Address dst, int imm) {
+            assert verify(asm, size, null, null);
+            emitOpcode(asm, size, getRXB(null, dst), 0, 0);
+            asm.emitOperandHelper(ext, dst);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    /**
+     * Opcodes with operand order of RMI.
+     */
+    public static class AMD64RMIOp extends AMD64ImmOp {
+        // @formatter:off
+        public static final AMD64RMIOp IMUL    = new AMD64RMIOp("IMUL", false, 0x69);
+        public static final AMD64RMIOp IMUL_SX = new AMD64RMIOp("IMUL", true,  0x6B);
+        // @formatter:on
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int op) {
+            this(opcode, immIsByte, 0, op, OpAssertion.IntegerAssertion);
+        }
+
+        protected AMD64RMIOp(String opcode, boolean immIsByte, int prefix, int op, OpAssertion assertion) {
+            super(opcode, immIsByte, prefix, op, assertion);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, Register src, int imm) {
+            assert verify(asm, size, dst, src);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, src.encoding);
+            asm.emitModRM(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+
+        public final void emit(AMD64Assembler asm, OperandSize size, Register dst, AMD64Address src, int imm) {
+            assert verify(asm, size, dst, null);
+            emitOpcode(asm, size, getRXB(dst, src), dst.encoding, 0);
+            asm.emitOperandHelper(dst, src);
+            emitImmediate(asm, size, imm);
+        }
+    }
+
+    public static class SSEOp extends AMD64RMOp {
+        // @formatter:off
+        public static final SSEOp CVTSI2SS  = new SSEOp("CVTSI2SS",  0xF3, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTSI2SD  = new SSEOp("CVTSI2SS",  0xF2, P_0F, 0x2A, OpAssertion.IntToFloatingAssertion);
+        public static final SSEOp CVTTSS2SI = new SSEOp("CVTTSS2SI", 0xF3, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp CVTTSD2SI = new SSEOp("CVTTSD2SI", 0xF2, P_0F, 0x2C, OpAssertion.FloatingToIntAssertion);
+        public static final SSEOp UCOMIS    = new SSEOp("UCOMIS",          P_0F, 0x2E, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp SQRT      = new SSEOp("SQRT",            P_0F, 0x51);
+        public static final SSEOp AND       = new SSEOp("AND",             P_0F, 0x54, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ANDN      = new SSEOp("ANDN",            P_0F, 0x55, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp OR        = new SSEOp("OR",              P_0F, 0x56, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp XOR       = new SSEOp("XOR",             P_0F, 0x57, OpAssertion.PackedFloatingAssertion);
+        public static final SSEOp ADD       = new SSEOp("ADD",             P_0F, 0x58);
+        public static final SSEOp MUL       = new SSEOp("MUL",             P_0F, 0x59);
+        public static final SSEOp CVTSS2SD  = new SSEOp("CVTSS2SD",        P_0F, 0x5A, OpAssertion.SingleAssertion);
+        public static final SSEOp CVTSD2SS  = new SSEOp("CVTSD2SS",        P_0F, 0x5A, OpAssertion.DoubleAssertion);
+        public static final SSEOp SUB       = new SSEOp("SUB",             P_0F, 0x5C);
+        public static final SSEOp MIN       = new SSEOp("MIN",             P_0F, 0x5D);
+        public static final SSEOp DIV       = new SSEOp("DIV",             P_0F, 0x5E);
+        public static final SSEOp MAX       = new SSEOp("MAX",             P_0F, 0x5F);
+        // @formatter:on
+
+        protected SSEOp(String opcode, int prefix, int op) {
+            this(opcode, prefix, op, OpAssertion.FloatingAssertion);
+        }
+
+        protected SSEOp(String opcode, int prefix, int op, OpAssertion assertion) {
+            this(opcode, 0, prefix, op, assertion);
+        }
+
+        protected SSEOp(String opcode, int mandatoryPrefix, int prefix, int op, OpAssertion assertion) {
+            super(opcode, mandatoryPrefix, prefix, op, assertion, CPUFeature.SSE2);
+        }
+    }
+
+    /**
+     * Arithmetic operation with operand order of RM, MR or MI.
+     */
+    public static final class AMD64BinaryArithmetic {
+        // @formatter:off
+        public static final AMD64BinaryArithmetic ADD = new AMD64BinaryArithmetic("ADD", 0);
+        public static final AMD64BinaryArithmetic OR  = new AMD64BinaryArithmetic("OR",  1);
+        public static final AMD64BinaryArithmetic ADC = new AMD64BinaryArithmetic("ADC", 2);
+        public static final AMD64BinaryArithmetic SBB = new AMD64BinaryArithmetic("SBB", 3);
+        public static final AMD64BinaryArithmetic AND = new AMD64BinaryArithmetic("AND", 4);
+        public static final AMD64BinaryArithmetic SUB = new AMD64BinaryArithmetic("SUB", 5);
+        public static final AMD64BinaryArithmetic XOR = new AMD64BinaryArithmetic("XOR", 6);
+        public static final AMD64BinaryArithmetic CMP = new AMD64BinaryArithmetic("CMP", 7);
+        // @formatter:on
+
+        private final AMD64MIOp byteImmOp;
+        private final AMD64MROp byteMrOp;
+        private final AMD64RMOp byteRmOp;
+
+        private final AMD64MIOp immOp;
+        private final AMD64MIOp immSxOp;
+        private final AMD64MROp mrOp;
+        private final AMD64RMOp rmOp;
+
+        private AMD64BinaryArithmetic(String opcode, int code) {
+            int baseOp = code << 3;
+
+            byteImmOp = new AMD64MIOp(opcode, true, 0, 0x80, code, OpAssertion.ByteAssertion);
+            byteMrOp = new AMD64MROp(opcode, 0, baseOp, OpAssertion.ByteAssertion);
+            byteRmOp = new AMD64RMOp(opcode, 0, baseOp | 0x02, OpAssertion.ByteAssertion);
+
+            immOp = new AMD64MIOp(opcode, false, 0, 0x81, code, OpAssertion.IntegerAssertion);
+            immSxOp = new AMD64MIOp(opcode, true, 0, 0x83, code, OpAssertion.IntegerAssertion);
+            mrOp = new AMD64MROp(opcode, 0, baseOp | 0x01, OpAssertion.IntegerAssertion);
+            rmOp = new AMD64RMOp(opcode, 0, baseOp | 0x03, OpAssertion.IntegerAssertion);
+        }
+
+        public AMD64MIOp getMIOpcode(OperandSize size, boolean sx) {
+            if (size == BYTE) {
+                return byteImmOp;
+            } else if (sx) {
+                return immSxOp;
+            } else {
+                return immOp;
+            }
+        }
+
+        public AMD64MROp getMROpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteMrOp;
+            } else {
+                return mrOp;
+            }
+        }
+
+        public AMD64RMOp getRMOpcode(OperandSize size) {
+            if (size == BYTE) {
+                return byteRmOp;
+            } else {
+                return rmOp;
+            }
+        }
+    }
+
+    /**
+     * Shift operation with operand order of M1, MC or MI.
+     */
+    public static final class AMD64Shift {
+        // @formatter:off
+        public static final AMD64Shift ROL = new AMD64Shift("ROL", 0);
+        public static final AMD64Shift ROR = new AMD64Shift("ROR", 1);
+        public static final AMD64Shift RCL = new AMD64Shift("RCL", 2);
+        public static final AMD64Shift RCR = new AMD64Shift("RCR", 3);
+        public static final AMD64Shift SHL = new AMD64Shift("SHL", 4);
+        public static final AMD64Shift SHR = new AMD64Shift("SHR", 5);
+        public static final AMD64Shift SAR = new AMD64Shift("SAR", 7);
+        // @formatter:on
+
+        public final AMD64MOp m1Op;
+        public final AMD64MOp mcOp;
+        public final AMD64MIOp miOp;
+
+        private AMD64Shift(String opcode, int code) {
+            m1Op = new AMD64MOp(opcode, 0, 0xD1, code, OpAssertion.IntegerAssertion);
+            mcOp = new AMD64MOp(opcode, 0, 0xD3, code, OpAssertion.IntegerAssertion);
+            miOp = new AMD64MIOp(opcode, true, 0, 0xC1, code, OpAssertion.IntegerAssertion);
+        }
+    }
+
+    public final void addl(AMD64Address dst, int imm32) {
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void addl(Register dst, int imm32) {
+        ADD.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    private void addrNop4() {
+        // 4 bytes: NOP DWORD PTR [EAX+0]
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x40); // emitRm(cbuf, 0x1, EAXEnc, EAXEnc);
+        emitByte(0); // 8-bits offset (1 byte)
+    }
+
+    private void addrNop5() {
+        // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x44); // emitRm(cbuf, 0x1, EAXEnc, 0x4);
+        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
+        emitByte(0); // 8-bits offset (1 byte)
+    }
+
+    private void addrNop7() {
+        // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x80); // emitRm(cbuf, 0x2, EAXEnc, EAXEnc);
+        emitInt(0); // 32-bits offset (4 bytes)
+    }
+
+    private void addrNop8() {
+        // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+        emitByte(0x0F);
+        emitByte(0x1F);
+        emitByte(0x84); // emitRm(cbuf, 0x2, EAXEnc, 0x4);
+        emitByte(0x00); // emitRm(cbuf, 0x0, EAXEnc, EAXEnc);
+        emitInt(0); // 32-bits offset (4 bytes)
+    }
+
+    public final void andl(Register dst, int imm32) {
+        AND.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void bswapl(Register reg) {
+        int encode = prefixAndEncode(reg.encoding);
+        emitByte(0x0F);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void cdql() {
+        emitByte(0x99);
+    }
+
+    public final void cmovl(ConditionFlag cc, Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitByte(0xC0 | encode);
+    }
+
+    public final void cmovl(ConditionFlag cc, Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitOperandHelper(dst, src);
+    }
+
+    public final void cmpl(Register dst, int imm32) {
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void cmpl(Register dst, Register src) {
+        CMP.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void cmpl(Register dst, AMD64Address src) {
+        CMP.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void cmpl(AMD64Address dst, int imm32) {
+        CMP.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    // The 32-bit cmpxchg compares the value at adr with the contents of X86.rax,
+    // and stores reg into adr if so; otherwise, the value at adr is loaded into X86.rax,.
+    // The ZF is set if the compared values were equal, and cleared otherwise.
+    public final void cmpxchgl(Register reg, AMD64Address adr) { // cmpxchg
+        prefix(adr, reg);
+        emitByte(0x0F);
+        emitByte(0xB1);
+        emitOperandHelper(reg, adr);
+    }
+
+    protected final void decl(AMD64Address dst) {
+        prefix(dst);
+        emitByte(0xFF);
+        emitOperandHelper(1, dst);
+    }
+
+    public final void hlt() {
+        emitByte(0xF4);
+    }
+
+    public final void imull(Register dst, Register src, int value) {
+        if (isByte(value)) {
+            AMD64RMIOp.IMUL_SX.emit(this, DWORD, dst, src, value);
+        } else {
+            AMD64RMIOp.IMUL.emit(this, DWORD, dst, src, value);
+        }
+    }
+
+    protected final void incl(AMD64Address dst) {
+        prefix(dst);
+        emitByte(0xFF);
+        emitOperandHelper(0, dst);
+    }
+
+    public void jcc(ConditionFlag cc, int jumpTarget, boolean forceDisp32) {
+        int shortSize = 2;
+        int longSize = 6;
+        long disp = jumpTarget - position();
+        if (!forceDisp32 && isByte(disp - shortSize)) {
+            // 0111 tttn #8-bit disp
+            emitByte(0x70 | cc.getValue());
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            // 0000 1111 1000 tttn #32-bit disp
+            assert isInt(disp - longSize) : "must be 32bit offset (call4)";
+            emitByte(0x0F);
+            emitByte(0x80 | cc.getValue());
+            emitInt((int) (disp - longSize));
+        }
+    }
+
+    public final void jcc(ConditionFlag cc, Label l) {
+        assert (0 <= cc.getValue()) && (cc.getValue() < 16) : "illegal cc";
+        if (l.isBound()) {
+            jcc(cc, l.position(), false);
+        } else {
+            // Note: could eliminate cond. jumps to this jump if condition
+            // is the same however, seems to be rather unlikely case.
+            // Note: use jccb() if label to be bound is very close to get
+            // an 8-bit displacement
+            l.addPatchAt(position());
+            emitByte(0x0F);
+            emitByte(0x80 | cc.getValue());
+            emitInt(0);
+        }
+
+    }
+
+    public final void jccb(ConditionFlag cc, Label l) {
+        if (l.isBound()) {
+            int shortSize = 2;
+            int entry = l.position();
+            assert isByte(entry - (position() + shortSize)) : "Dispacement too large for a short jmp";
+            long disp = entry - position();
+            // 0111 tttn #8-bit disp
+            emitByte(0x70 | cc.getValue());
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            l.addPatchAt(position());
+            emitByte(0x70 | cc.getValue());
+            emitByte(0);
+        }
+    }
+
+    public final void jmp(int jumpTarget, boolean forceDisp32) {
+        int shortSize = 2;
+        int longSize = 5;
+        long disp = jumpTarget - position();
+        if (!forceDisp32 && isByte(disp - shortSize)) {
+            emitByte(0xEB);
+            emitByte((int) ((disp - shortSize) & 0xFF));
+        } else {
+            emitByte(0xE9);
+            emitInt((int) (disp - longSize));
+        }
+    }
+
+    @Override
+    public final void jmp(Label l) {
+        if (l.isBound()) {
+            jmp(l.position(), false);
+        } else {
+            // By default, forward jumps are always 32-bit displacements, since
+            // we can't yet know where the label will be bound. If you're sure that
+            // the forward jump will not run beyond 256 bytes, use jmpb to
+            // force an 8-bit displacement.
+
+            l.addPatchAt(position());
+            emitByte(0xE9);
+            emitInt(0);
+        }
+    }
+
+    public final void jmp(Register entry) {
+        int encode = prefixAndEncode(entry.encoding);
+        emitByte(0xFF);
+        emitByte(0xE0 | encode);
+    }
+
+    public final void jmpb(Label l) {
+        if (l.isBound()) {
+            int shortSize = 2;
+            int entry = l.position();
+            assert isByte((entry - position()) + shortSize) : "Dispacement too large for a short jmp";
+            long offs = entry - position();
+            emitByte(0xEB);
+            emitByte((int) ((offs - shortSize) & 0xFF));
+        } else {
+
+            l.addPatchAt(position());
+            emitByte(0xEB);
+            emitByte(0);
+        }
+    }
+
+    public final void leaq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x8D);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void leave() {
+        emitByte(0xC9);
+    }
+
+    public final void lock() {
+        emitByte(0xF0);
+    }
+
+    public final void movapd(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        int dstenc = dst.encoding;
+        int srcenc = src.encoding;
+        emitByte(0x66);
+        if (dstenc < 8) {
+            if (srcenc >= 8) {
+                emitByte(Prefix.REXB);
+                srcenc -= 8;
+            }
+        } else {
+            if (srcenc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcenc -= 8;
+            }
+            dstenc -= 8;
+        }
+        emitByte(0x0F);
+        emitByte(0x28);
+        emitByte(0xC0 | dstenc << 3 | srcenc);
+    }
+
+    public final void movaps(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        int dstenc = dst.encoding;
+        int srcenc = src.encoding;
+        if (dstenc < 8) {
+            if (srcenc >= 8) {
+                emitByte(Prefix.REXB);
+                srcenc -= 8;
+            }
+        } else {
+            if (srcenc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcenc -= 8;
+            }
+            dstenc -= 8;
+        }
+        emitByte(0x0F);
+        emitByte(0x28);
+        emitByte(0xC0 | dstenc << 3 | srcenc);
+    }
+
+    public final void movb(AMD64Address dst, int imm8) {
+        prefix(dst);
+        emitByte(0xC6);
+        emitOperandHelper(0, dst);
+        emitByte(imm8);
+    }
+
+    public final void movb(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.CPU) : "must have byte register";
+        prefix(dst, src, true);
+        emitByte(0x88);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movl(Register dst, int imm32) {
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xB8 | encode);
+        emitInt(imm32);
+    }
+
+    public final void movl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x8B);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x8B);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movl(AMD64Address dst, int imm32) {
+        prefix(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitInt(imm32);
+    }
+
+    public final void movl(AMD64Address dst, Register src) {
+        prefix(dst, src);
+        emitByte(0x89);
+        emitOperandHelper(src, dst);
+    }
+
+    /**
+     * New CPUs require use of movsd and movss to avoid partial register stall when loading from
+     * memory. But for old Opteron use movlpd instead of movsd. The selection is done in
+     * {@link AMD64MacroAssembler#movdbl(Register, AMD64Address)} and
+     * {@link AMD64MacroAssembler#movflt(Register, Register)}.
+     */
+    public final void movlpd(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0x66);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x12);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movq(Register dst, AMD64Address src) {
+        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
+            emitByte(0xF3);
+            prefixq(src, dst);
+            emitByte(0x0F);
+            emitByte(0x7E);
+            emitOperandHelper(dst, src);
+        } else {
+            prefixq(src, dst);
+            emitByte(0x8B);
+            emitOperandHelper(dst, src);
+        }
+    }
+
+    public final void movq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x8B);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movq(AMD64Address dst, Register src) {
+        if (src.getRegisterCategory().equals(AMD64.XMM)) {
+            emitByte(0x66);
+            prefixq(dst, src);
+            emitByte(0x0F);
+            emitByte(0xD6);
+            emitOperandHelper(src, dst);
+        } else {
+            prefixq(dst, src);
+            emitByte(0x89);
+            emitOperandHelper(src, dst);
+        }
+    }
+
+    public final void movsbl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsbl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, false, src.encoding, true);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsbq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsbq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0xBE);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsd(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movsd(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movsd(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF2);
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0x11);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movss(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void movss(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x10);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movss(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        emitByte(0xF3);
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0x11);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movswl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xBF);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movw(AMD64Address dst, int imm16) {
+        emitByte(0x66); // switch to 16-bit mode
+        prefix(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitShort(imm16);
+    }
+
+    public final void movw(AMD64Address dst, Register src) {
+        emitByte(0x66);
+        prefix(dst, src);
+        emitByte(0x89);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void movzbl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xB6);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movzwl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0xB7);
+        emitOperandHelper(dst, src);
+    }
+
+    @Override
+    public final void ensureUniquePC() {
+        nop();
+    }
+
+    public final void nop() {
+        nop(1);
+    }
+
+    public void nop(int count) {
+        int i = count;
+        if (UseNormalNop) {
+            assert i > 0 : " ";
+            // The fancy nops aren't currently recognized by debuggers making it a
+            // pain to disassemble code while debugging. If assert are on clearly
+            // speed is not an issue so simply use the single byte traditional nop
+            // to do alignment.
+
+            for (; i > 0; i--) {
+                emitByte(0x90);
+            }
+            return;
+        }
+
+        if (UseAddressNop) {
+            //
+            // Using multi-bytes nops "0x0F 0x1F [Address]" for AMD.
+            // 1: 0x90
+            // 2: 0x66 0x90
+            // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
+            // 4: 0x0F 0x1F 0x40 0x00
+            // 5: 0x0F 0x1F 0x44 0x00 0x00
+            // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+
+            // The rest coding is AMD specific - use consecutive Address nops
+
+            // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+            // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+            // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+            // Size prefixes (0x66) are added for larger sizes
+
+            while (i >= 22) {
+                i -= 11;
+                emitByte(0x66); // size prefix
+                emitByte(0x66); // size prefix
+                emitByte(0x66); // size prefix
+                addrNop8();
+            }
+            // Generate first nop for size between 21-12
+            switch (i) {
+                case 21:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 20:
+                    // fall through
+                case 19:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 18:
+                    // fall through
+                case 17:
+                    i -= 1;
+                    emitByte(0x66); // size prefix
+                    // fall through
+                case 16:
+                    // fall through
+                case 15:
+                    i -= 8;
+                    addrNop8();
+                    break;
+                case 14:
+                case 13:
+                    i -= 7;
+                    addrNop7();
+                    break;
+                case 12:
+                    i -= 6;
+                    emitByte(0x66); // size prefix
+                    addrNop5();
+                    break;
+                default:
+                    assert i < 12;
+            }
+
+            // Generate second nop for size between 11-1
+            switch (i) {
+                case 11:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 10:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 9:
+                    emitByte(0x66); // size prefix
+                    addrNop8();
+                    break;
+                case 8:
+                    addrNop8();
+                    break;
+                case 7:
+                    addrNop7();
+                    break;
+                case 6:
+                    emitByte(0x66); // size prefix
+                    addrNop5();
+                    break;
+                case 5:
+                    addrNop5();
+                    break;
+                case 4:
+                    addrNop4();
+                    break;
+                case 3:
+                    // Don't use "0x0F 0x1F 0x00" - need patching safe padding
+                    emitByte(0x66); // size prefix
+                    emitByte(0x66); // size prefix
+                    emitByte(0x90); // nop
+                    break;
+                case 2:
+                    emitByte(0x66); // size prefix
+                    emitByte(0x90); // nop
+                    break;
+                case 1:
+                    emitByte(0x90); // nop
+                    break;
+                default:
+                    assert i == 0;
+            }
+            return;
+        }
+
+        // Using nops with size prefixes "0x66 0x90".
+        // From AMD Optimization Guide:
+        // 1: 0x90
+        // 2: 0x66 0x90
+        // 3: 0x66 0x66 0x90
+        // 4: 0x66 0x66 0x66 0x90
+        // 5: 0x66 0x66 0x90 0x66 0x90
+        // 6: 0x66 0x66 0x90 0x66 0x66 0x90
+        // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
+        // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
+        // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+        // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+        //
+        while (i > 12) {
+            i -= 4;
+            emitByte(0x66); // size prefix
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90); // nop
+        }
+        // 1 - 12 nops
+        if (i > 8) {
+            if (i > 9) {
+                i -= 1;
+                emitByte(0x66);
+            }
+            i -= 3;
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90);
+        }
+        // 1 - 8 nops
+        if (i > 4) {
+            if (i > 6) {
+                i -= 1;
+                emitByte(0x66);
+            }
+            i -= 3;
+            emitByte(0x66);
+            emitByte(0x66);
+            emitByte(0x90);
+        }
+        switch (i) {
+            case 4:
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 3:
+                emitByte(0x66);
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 2:
+                emitByte(0x66);
+                emitByte(0x90);
+                break;
+            case 1:
+                emitByte(0x90);
+                break;
+            default:
+                assert i == 0;
+        }
+    }
+
+    public final void pop(Register dst) {
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0x58 | encode);
+    }
+
+    public void popfq() {
+        emitByte(0x9D);
+    }
+
+    public final void ptest(Register dst, Register src) {
+        assert supports(CPUFeature.SSE4_1);
+        emitByte(0x66);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x38);
+        emitByte(0x17);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void push(Register src) {
+        int encode = prefixAndEncode(src.encoding);
+        emitByte(0x50 | encode);
+    }
+
+    public void pushfq() {
+        emitByte(0x9c);
+    }
+
+    public final void pxor(Register dst, Register src) {
+        emitByte(0x66);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0xEF);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void ret(int imm16) {
+        if (imm16 == 0) {
+            emitByte(0xC3);
+        } else {
+            emitByte(0xC2);
+            emitShort(imm16);
+        }
+    }
+
+    public final void subl(AMD64Address dst, int imm32) {
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void subl(Register dst, int imm32) {
+        SUB.getMIOpcode(DWORD, isByte(imm32)).emit(this, DWORD, dst, imm32);
+    }
+
+    public final void testl(Register dst, int imm32) {
+        // not using emitArith because test
+        // doesn't support sign-extension of
+        // 8bit operands
+        int encode = dst.encoding;
+        if (encode == 0) {
+            emitByte(0xA9);
+        } else {
+            encode = prefixAndEncode(encode);
+            emitByte(0xF7);
+            emitByte(0xC0 | encode);
+        }
+        emitInt(imm32);
+    }
+
+    public final void testl(Register dst, Register src) {
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x85);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void testl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x85);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void xorl(Register dst, Register src) {
+        XOR.rmOp.emit(this, DWORD, dst, src);
+    }
+
+    public final void xorpd(Register dst, Register src) {
+        emitByte(0x66);
+        xorps(dst, src);
+    }
+
+    public final void xorps(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        int encode = prefixAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x57);
+        emitByte(0xC0 | encode);
+    }
+
+    protected final void decl(Register dst) {
+        // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC8 | encode);
+    }
+
+    protected final void incl(Register dst) {
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC0 | encode);
+    }
+
+    private int prefixAndEncode(int regEnc) {
+        return prefixAndEncode(regEnc, false);
+    }
+
+    private int prefixAndEncode(int regEnc, boolean byteinst) {
+        if (regEnc >= 8) {
+            emitByte(Prefix.REXB);
+            return regEnc - 8;
+        } else if (byteinst && regEnc >= 4) {
+            emitByte(Prefix.REX);
+        }
+        return regEnc;
+    }
+
+    private int prefixqAndEncode(int regEnc) {
+        if (regEnc < 8) {
+            emitByte(Prefix.REXW);
+            return regEnc;
+        } else {
+            emitByte(Prefix.REXWB);
+            return regEnc - 8;
+        }
+    }
+
+    private int prefixAndEncode(int dstEnc, int srcEnc) {
+        return prefixAndEncode(dstEnc, false, srcEnc, false);
+    }
+
+    private int prefixAndEncode(int dstEncoding, boolean dstIsByte, int srcEncoding, boolean srcIsByte) {
+        int srcEnc = srcEncoding;
+        int dstEnc = dstEncoding;
+        if (dstEnc < 8) {
+            if (srcEnc >= 8) {
+                emitByte(Prefix.REXB);
+                srcEnc -= 8;
+            } else if ((srcIsByte && srcEnc >= 4) || (dstIsByte && dstEnc >= 4)) {
+                emitByte(Prefix.REX);
+            }
+        } else {
+            if (srcEnc < 8) {
+                emitByte(Prefix.REXR);
+            } else {
+                emitByte(Prefix.REXRB);
+                srcEnc -= 8;
+            }
+            dstEnc -= 8;
+        }
+        return dstEnc << 3 | srcEnc;
+    }
+
+    /**
+     * Creates prefix and the encoding of the lower 6 bits of the ModRM-Byte. It emits an operand
+     * prefix. If the given operands exceed 3 bits, the 4th bit is encoded in the prefix.
+     *
+     * @param regEncoding the encoding of the register part of the ModRM-Byte
+     * @param rmEncoding the encoding of the r/m part of the ModRM-Byte
+     * @return the lower 6 bits of the ModRM-Byte that should be emitted
+     */
+    private int prefixqAndEncode(int regEncoding, int rmEncoding) {
+        int rmEnc = rmEncoding;
+        int regEnc = regEncoding;
+        if (regEnc < 8) {
+            if (rmEnc < 8) {
+                emitByte(Prefix.REXW);
+            } else {
+                emitByte(Prefix.REXWB);
+                rmEnc -= 8;
+            }
+        } else {
+            if (rmEnc < 8) {
+                emitByte(Prefix.REXWR);
+            } else {
+                emitByte(Prefix.REXWRB);
+                rmEnc -= 8;
+            }
+            regEnc -= 8;
+        }
+        return regEnc << 3 | rmEnc;
+    }
+
+    private static boolean needsRex(Register reg) {
+        return reg.encoding >= MinEncodingNeedsRex;
+    }
+
+    private void prefix(AMD64Address adr) {
+        if (needsRex(adr.getBase())) {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXXB);
+            } else {
+                emitByte(Prefix.REXB);
+            }
+        } else {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXX);
+            }
+        }
+    }
+
+    private void prefixq(AMD64Address adr) {
+        if (needsRex(adr.getBase())) {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXWXB);
+            } else {
+                emitByte(Prefix.REXWB);
+            }
+        } else {
+            if (needsRex(adr.getIndex())) {
+                emitByte(Prefix.REXWX);
+            } else {
+                emitByte(Prefix.REXW);
+            }
+        }
+    }
+
+    private void prefix(AMD64Address adr, Register reg) {
+        prefix(adr, reg, false);
+    }
+
+    private void prefix(AMD64Address adr, Register reg, boolean byteinst) {
+        if (reg.encoding < 8) {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXXB);
+                } else {
+                    emitByte(Prefix.REXB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXX);
+                } else if (byteinst && reg.encoding >= 4) {
+                    emitByte(Prefix.REX);
+                }
+            }
+        } else {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXRXB);
+                } else {
+                    emitByte(Prefix.REXRB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXRX);
+                } else {
+                    emitByte(Prefix.REXR);
+                }
+            }
+        }
+    }
+
+    private void prefixq(AMD64Address adr, Register src) {
+        if (src.encoding < 8) {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWXB);
+                } else {
+                    emitByte(Prefix.REXWB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWX);
+                } else {
+                    emitByte(Prefix.REXW);
+                }
+            }
+        } else {
+            if (needsRex(adr.getBase())) {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWRXB);
+                } else {
+                    emitByte(Prefix.REXWRB);
+                }
+            } else {
+                if (needsRex(adr.getIndex())) {
+                    emitByte(Prefix.REXWRX);
+                } else {
+                    emitByte(Prefix.REXWR);
+                }
+            }
+        }
+    }
+
+    public final void addq(Register dst, int imm32) {
+        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void addq(AMD64Address dst, int imm32) {
+        ADD.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void addq(Register dst, Register src) {
+        ADD.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void addq(AMD64Address dst, Register src) {
+        ADD.mrOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void andq(Register dst, int imm32) {
+        AND.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void bswapq(Register reg) {
+        int encode = prefixqAndEncode(reg.encoding);
+        emitByte(0x0F);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void cdqq() {
+        emitByte(Prefix.REXW);
+        emitByte(0x99);
+    }
+
+    public final void cmovq(ConditionFlag cc, Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitByte(0xC0 | encode);
+    }
+
+    public final void cmovq(ConditionFlag cc, Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x0F);
+        emitByte(0x40 | cc.getValue());
+        emitOperandHelper(dst, src);
+    }
+
+    public final void cmpq(Register dst, int imm32) {
+        CMP.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void cmpq(Register dst, Register src) {
+        CMP.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void cmpq(Register dst, AMD64Address src) {
+        CMP.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void cmpxchgq(Register reg, AMD64Address adr) {
+        prefixq(adr, reg);
+        emitByte(0x0F);
+        emitByte(0xB1);
+        emitOperandHelper(reg, adr);
+    }
+
+    protected final void decq(Register dst) {
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC8 | encode);
+    }
+
+    public final void decq(AMD64Address dst) {
+        DEC.emit(this, QWORD, dst);
+    }
+
+    public final void incq(Register dst) {
+        // Don't use it directly. Use Macroincrementq() instead.
+        // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xFF);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void incq(AMD64Address dst) {
+        INC.emit(this, QWORD, dst);
+    }
+
+    public final void movq(Register dst, long imm64) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xB8 | encode);
+        emitLong(imm64);
+    }
+
+    public final void movslq(Register dst, int imm32) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xC7);
+        emitByte(0xC0 | encode);
+        emitInt(imm32);
+    }
+
+    public final void movdq(Register dst, Register src) {
+
+        // table D-1 says MMX/SSE2
+        emitByte(0x66);
+
+        if (dst.getRegisterCategory().equals(AMD64.XMM)) {
+            int encode = prefixqAndEncode(dst.encoding, src.encoding);
+            emitByte(0x0F);
+            emitByte(0x6E);
+            emitByte(0xC0 | encode);
+        } else if (src.getRegisterCategory().equals(AMD64.XMM)) {
+
+            // swap src/dst to get correct prefix
+            int encode = prefixqAndEncode(src.encoding, dst.encoding);
+            emitByte(0x0F);
+            emitByte(0x7E);
+            emitByte(0xC0 | encode);
+        } else {
+            throw new InternalError("should not reach here");
+        }
+    }
+
+    public final void movdqu(Register dst, AMD64Address src) {
+        emitByte(0xF3);
+        prefix(src, dst);
+        emitByte(0x0F);
+        emitByte(0x6F);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movslq(AMD64Address dst, int imm32) {
+        prefixq(dst);
+        emitByte(0xC7);
+        emitOperandHelper(0, dst);
+        emitInt(imm32);
+    }
+
+    public final void movslq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x63);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void movslq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x63);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void negq(Register dst) {
+        int encode = prefixqAndEncode(dst.encoding);
+        emitByte(0xF7);
+        emitByte(0xD8 | encode);
+    }
+
+    public final void shlq(Register dst, int imm8) {
+        assert isShiftCount(imm8 >> 1) : "illegal shift count";
+        int encode = prefixqAndEncode(dst.encoding);
+        if (imm8 == 1) {
+            emitByte(0xD1);
+            emitByte(0xE0 | encode);
+        } else {
+            emitByte(0xC1);
+            emitByte(0xE0 | encode);
+            emitByte(imm8);
+        }
+    }
+
+    public final void shrq(Register dst, int imm8) {
+        assert isShiftCount(imm8 >> 1) : "illegal shift count";
+        int encode = prefixqAndEncode(dst.encoding);
+        if (imm8 == 1) {
+            emitByte(0xD1);
+            emitByte(0xE8 | encode);
+        } else {
+            emitByte(0xC1);
+            emitByte(0xE8 | encode);
+            emitByte(imm8);
+        }
+    }
+
+    public final void subq(Register dst, int imm32) {
+        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subq(AMD64Address dst, int imm32) {
+        SUB.getMIOpcode(QWORD, isByte(imm32)).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subqWide(Register dst, int imm32) {
+        // don't use the sign-extending version, forcing a 32-bit immediate
+        SUB.getMIOpcode(QWORD, false).emit(this, QWORD, dst, imm32);
+    }
+
+    public final void subq(Register dst, Register src) {
+        SUB.rmOp.emit(this, QWORD, dst, src);
+    }
+
+    public final void testq(Register dst, Register src) {
+        int encode = prefixqAndEncode(dst.encoding, src.encoding);
+        emitByte(0x85);
+        emitByte(0xC0 | encode);
+    }
+
+    public final void xaddl(AMD64Address dst, Register src) {
+        prefix(dst, src);
+        emitByte(0x0F);
+        emitByte(0xC1);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void xaddq(AMD64Address dst, Register src) {
+        prefixq(dst, src);
+        emitByte(0x0F);
+        emitByte(0xC1);
+        emitOperandHelper(src, dst);
+    }
+
+    public final void xchgl(Register dst, AMD64Address src) {
+        prefix(src, dst);
+        emitByte(0x87);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void xchgq(Register dst, AMD64Address src) {
+        prefixq(src, dst);
+        emitByte(0x87);
+        emitOperandHelper(dst, src);
+    }
+
+    public final void membar(int barriers) {
+        if (target.isMP) {
+            // We only have to handle StoreLoad
+            if ((barriers & STORE_LOAD) != 0) {
+                // All usable chips support "locked" instructions which suffice
+                // as barriers, and are much faster than the alternative of
+                // using cpuid instruction. We use here a locked add [rsp],0.
+                // This is conveniently otherwise a no-op except for blowing
+                // flags.
+                // Any change to this code may need to revisit other places in
+                // the code where this idiom is used, in particular the
+                // orderAccess code.
+                lock();
+                addl(new AMD64Address(rsp, 0), 0); // Assert the lock# signal here
+            }
+        }
+    }
+
+    @Override
+    protected final void patchJumpTarget(int branch, int branchTarget) {
+        int op = getByte(branch);
+        assert op == 0xE8 // call
+                        ||
+                        op == 0x00 // jump table entry
+                        || op == 0xE9 // jmp
+                        || op == 0xEB // short jmp
+                        || (op & 0xF0) == 0x70 // short jcc
+                        || op == 0x0F && (getByte(branch + 1) & 0xF0) == 0x80 // jcc
+        : "Invalid opcode at patch point branch=" + branch + ", branchTarget=" + branchTarget + ", op=" + op;
+
+        if (op == 0x00) {
+            int offsetToJumpTableBase = getShort(branch + 1);
+            int jumpTableBase = branch - offsetToJumpTableBase;
+            int imm32 = branchTarget - jumpTableBase;
+            emitInt(imm32, branch);
+        } else if (op == 0xEB || (op & 0xF0) == 0x70) {
+
+            // short offset operators (jmp and jcc)
+            final int imm8 = branchTarget - (branch + 2);
+            /*
+             * Since a wrongly patched short branch can potentially lead to working but really bad
+             * behaving code we should always fail with an exception instead of having an assert.
+             */
+            if (!NumUtil.isByte(imm8)) {
+                throw new InternalError("branch displacement out of range: " + imm8);
+            }
+            emitByte(imm8, branch + 1);
+
+        } else {
+
+            int off = 1;
+            if (op == 0x0F) {
+                off = 2;
+            }
+
+            int imm32 = branchTarget - (branch + 4 + off);
+            emitInt(imm32, branch + off);
+        }
+    }
+
+    public void nullCheck(AMD64Address address) {
+        testl(AMD64.rax, address);
+    }
+
+    @Override
+    public void align(int modulus) {
+        if (position() % modulus != 0) {
+            nop(modulus - (position() % modulus));
+        }
+    }
+
+    /**
+     * Emits a direct call instruction. Note that the actual call target is not specified, because
+     * all calls need patching anyway. Therefore, 0 is emitted as the call target, and the user is
+     * responsible to add the call address to the appropriate patching tables.
+     */
+    public final void call() {
+        emitByte(0xE8);
+        emitInt(0);
+    }
+
+    public final void call(Register src) {
+        int encode = prefixAndEncode(src.encoding);
+        emitByte(0xFF);
+        emitByte(0xD0 | encode);
+    }
+
+    public final void int3() {
+        emitByte(0xCC);
+    }
+
+    private void emitx87(int b1, int b2, int i) {
+        assert 0 <= i && i < 8 : "illegal stack offset";
+        emitByte(b1);
+        emitByte(b2 + i);
+    }
+
+    public final void fldd(AMD64Address src) {
+        emitByte(0xDD);
+        emitOperandHelper(0, src);
+    }
+
+    public final void flds(AMD64Address src) {
+        emitByte(0xD9);
+        emitOperandHelper(0, src);
+    }
+
+    public final void fldln2() {
+        emitByte(0xD9);
+        emitByte(0xED);
+    }
+
+    public final void fldlg2() {
+        emitByte(0xD9);
+        emitByte(0xEC);
+    }
+
+    public final void fyl2x() {
+        emitByte(0xD9);
+        emitByte(0xF1);
+    }
+
+    public final void fstps(AMD64Address src) {
+        emitByte(0xD9);
+        emitOperandHelper(3, src);
+    }
+
+    public final void fstpd(AMD64Address src) {
+        emitByte(0xDD);
+        emitOperandHelper(3, src);
+    }
+
+    private void emitFPUArith(int b1, int b2, int i) {
+        assert 0 <= i && i < 8 : "illegal FPU register: " + i;
+        emitByte(b1);
+        emitByte(b2 + i);
+    }
+
+    public void ffree(int i) {
+        emitFPUArith(0xDD, 0xC0, i);
+    }
+
+    public void fincstp() {
+        emitByte(0xD9);
+        emitByte(0xF7);
+    }
+
+    public void fxch(int i) {
+        emitFPUArith(0xD9, 0xC8, i);
+    }
+
+    public void fnstswAX() {
+        emitByte(0xDF);
+        emitByte(0xE0);
+    }
+
+    public void fwait() {
+        emitByte(0x9B);
+    }
+
+    public void fprem() {
+        emitByte(0xD9);
+        emitByte(0xF8);
+    }
+
+    public final void fsin() {
+        emitByte(0xD9);
+        emitByte(0xFE);
+    }
+
+    public final void fcos() {
+        emitByte(0xD9);
+        emitByte(0xFF);
+    }
+
+    public final void fptan() {
+        emitByte(0xD9);
+        emitByte(0xF2);
+    }
+
+    public final void fstp(int i) {
+        emitx87(0xDD, 0xD8, i);
+    }
+
+    @Override
+    public AMD64Address makeAddress(Register base, int displacement) {
+        return new AMD64Address(base, displacement);
+    }
+
+    @Override
+    public AMD64Address getPlaceholder() {
+        return Placeholder;
+    }
+
+    private void prefetchPrefix(AMD64Address src) {
+        prefix(src);
+        emitByte(0x0F);
+    }
+
+    public void prefetchnta(AMD64Address src) {
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(0, src);
+    }
+
+    void prefetchr(AMD64Address src) {
+        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
+        prefetchPrefix(src);
+        emitByte(0x0D);
+        emitOperandHelper(0, src);
+    }
+
+    public void prefetcht0(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(1, src);
+    }
+
+    public void prefetcht1(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefetchPrefix(src);
+        emitByte(0x18);
+        emitOperandHelper(2, src);
+    }
+
+    public void prefetcht2(AMD64Address src) {
+        assert supports(CPUFeature.SSE);
+        prefix(src);
+        emitByte(0x0f);
+        emitByte(0x18);
+        emitOperandHelper(3, src);
+    }
+
+    public void prefetchw(AMD64Address src) {
+        assert supports(CPUFeature.AMD_3DNOW_PREFETCH);
+        prefix(src);
+        emitByte(0x0f);
+        emitByte(0x0D);
+        emitOperandHelper(1, src);
+    }
+
+    /**
+     * Emits an instruction which is considered to be illegal. This is used if we deliberately want
+     * to crash the program (debugging etc.).
+     */
+    public void illegal() {
+        emitByte(0x0f);
+        emitByte(0x0b);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.amd64/src/com/oracle/graal/asm/amd64/AMD64MacroAssembler.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.amd64;
+
+import com.oracle.graal.asm.*;
+import com.oracle.jvmci.amd64.*;
+import com.oracle.jvmci.code.Register;
+import com.oracle.jvmci.code.CalleeSaveLayout;
+import com.oracle.jvmci.code.TargetDescription;
+import com.oracle.jvmci.code.RegisterConfig;
+import com.oracle.jvmci.meta.Kind;
+
+import static com.oracle.graal.asm.amd64.AMD64AsmOptions.*;
+
+/**
+ * This class implements commonly used X86 code patterns.
+ */
+public class AMD64MacroAssembler extends AMD64Assembler {
+
+    public AMD64MacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target, registerConfig);
+    }
+
+    public final void decrementq(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subq(reg, value);
+            return;
+        }
+        if (value < 0) {
+            incrementq(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decq(reg);
+        } else {
+            subq(reg, value);
+        }
+    }
+
+    public final void decrementq(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subq(dst, value);
+            return;
+        }
+        if (value < 0) {
+            incrementq(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decq(dst);
+        } else {
+            subq(dst, value);
+        }
+    }
+
+    public void incrementq(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addq(reg, value);
+            return;
+        }
+        if (value < 0) {
+            decrementq(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incq(reg);
+        } else {
+            addq(reg, value);
+        }
+    }
+
+    public final void incrementq(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addq(dst, value);
+            return;
+        }
+        if (value < 0) {
+            decrementq(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incq(dst);
+        } else {
+            addq(dst, value);
+        }
+    }
+
+    public final void movptr(Register dst, AMD64Address src) {
+        movq(dst, src);
+    }
+
+    public final void movptr(AMD64Address dst, Register src) {
+        movq(dst, src);
+    }
+
+    public final void movptr(AMD64Address dst, int src) {
+        movslq(dst, src);
+    }
+
+    public final void cmpptr(Register src1, Register src2) {
+        cmpq(src1, src2);
+    }
+
+    public final void cmpptr(Register src1, AMD64Address src2) {
+        cmpq(src1, src2);
+    }
+
+    public final void decrementl(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subl(reg, value);
+            return;
+        }
+        if (value < 0) {
+            incrementl(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decl(reg);
+        } else {
+            subl(reg, value);
+        }
+    }
+
+    public final void decrementl(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            subl(dst, value);
+            return;
+        }
+        if (value < 0) {
+            incrementl(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            decl(dst);
+        } else {
+            subl(dst, value);
+        }
+    }
+
+    public final void incrementl(Register reg, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addl(reg, value);
+            return;
+        }
+        if (value < 0) {
+            decrementl(reg, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incl(reg);
+        } else {
+            addl(reg, value);
+        }
+    }
+
+    public final void incrementl(AMD64Address dst, int value) {
+        if (value == Integer.MIN_VALUE) {
+            addl(dst, value);
+            return;
+        }
+        if (value < 0) {
+            decrementl(dst, -value);
+            return;
+        }
+        if (value == 0) {
+            return;
+        }
+        if (value == 1 && UseIncDec) {
+            incl(dst);
+        } else {
+            addl(dst, value);
+        }
+    }
+
+    public void movflt(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmRegToRegMoveAll) {
+            movaps(dst, src);
+        } else {
+            movss(dst, src);
+        }
+    }
+
+    public void movflt(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        movss(dst, src);
+    }
+
+    public void movflt(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        movss(dst, src);
+    }
+
+    public void movdbl(Register dst, Register src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM) && src.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmRegToRegMoveAll) {
+            movapd(dst, src);
+        } else {
+            movsd(dst, src);
+        }
+    }
+
+    public void movdbl(Register dst, AMD64Address src) {
+        assert dst.getRegisterCategory().equals(AMD64.XMM);
+        if (UseXmmLoadAndClearUpper) {
+            movsd(dst, src);
+        } else {
+            movlpd(dst, src);
+        }
+    }
+
+    public void movdbl(AMD64Address dst, Register src) {
+        assert src.getRegisterCategory().equals(AMD64.XMM);
+        movsd(dst, src);
+    }
+
+    /**
+     * Non-atomic write of a 64-bit constant to memory. Do not use if the address might be a
+     * volatile field!
+     */
+    public final void movlong(AMD64Address dst, long src) {
+        if (NumUtil.isInt(src)) {
+            AMD64MIOp.MOV.emit(this, OperandSize.QWORD, dst, (int) src);
+        } else {
+            AMD64Address high = new AMD64Address(dst.getBase(), dst.getIndex(), dst.getScale(), dst.getDisplacement() + 4);
+            movl(dst, (int) (src & 0xFFFFFFFF));
+            movl(high, (int) (src >> 32));
+        }
+
+    }
+
+    public final void flog(Register dest, Register value, boolean base10) {
+        if (base10) {
+            fldlg2();
+        } else {
+            fldln2();
+        }
+        AMD64Address tmp = trigPrologue(value);
+        fyl2x();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fsin(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fsin();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fcos(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fcos();
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void ftan(Register dest, Register value) {
+        AMD64Address tmp = trigPrologue(value);
+        fptan();
+        fstp(0); // ftan pushes 1.0 in addition to the actual result, pop
+        trigEpilogue(dest, tmp);
+    }
+
+    public final void fpop() {
+        ffree(0);
+        fincstp();
+    }
+
+    private AMD64Address trigPrologue(Register value) {
+        assert value.getRegisterCategory().equals(AMD64.XMM);
+        AMD64Address tmp = new AMD64Address(AMD64.rsp);
+        subq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
+        movdbl(tmp, value);
+        fldd(tmp);
+        return tmp;
+    }
+
+    private void trigEpilogue(Register dest, AMD64Address tmp) {
+        assert dest.getRegisterCategory().equals(AMD64.XMM);
+        fstpd(tmp);
+        movdbl(dest, tmp);
+        addq(AMD64.rsp, target.getSizeInBytes(Kind.Double));
+    }
+
+    /**
+     * Emit code to save a given set of callee save registers in the {@linkplain CalleeSaveLayout
+     * CSA} within the frame.
+     *
+     * @param csl the description of the CSA
+     * @param frameToCSA offset from the frame pointer to the CSA
+     */
+    public final void save(CalleeSaveLayout csl, int frameToCSA) {
+        for (Register r : csl.registers) {
+            int offset = csl.offsetOf(r);
+            movq(new AMD64Address(frameRegister, frameToCSA + offset), r);
+        }
+    }
+
+    public final void restore(CalleeSaveLayout csl, int frameToCSA) {
+        for (Register r : csl.registers) {
+            int offset = csl.offsetOf(r);
+            movq(r, new AMD64Address(frameRegister, frameToCSA + offset));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAddress.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.sparc;
+
+import static com.oracle.jvmci.sparc.SPARC.*;
+
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.sparc.*;
+
+public class SPARCAddress extends AbstractAddress {
+
+    private final Register base;
+    private final Register index;
+    private final int displacement;
+
+    /**
+     * Creates an {@link SPARCAddress} with given base register, no scaling and a given
+     * displacement.
+     *
+     * @param base the base register
+     * @param displacement the displacement
+     */
+    public SPARCAddress(Register base, int displacement) {
+        this.base = base;
+        this.index = Register.None;
+        this.displacement = displacement;
+    }
+
+    /**
+     * Creates an {@link SPARCAddress} with given base register, no scaling and a given index.
+     *
+     * @param base the base register
+     * @param index the index register
+     */
+    public SPARCAddress(Register base, Register index) {
+        this.base = base;
+        this.index = index;
+        this.displacement = 0;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder s = new StringBuilder();
+        s.append("[");
+        String sep = "";
+        if (!getBase().equals(Register.None)) {
+            s.append(getBase());
+            sep = " + ";
+        }
+        if (!getIndex().equals(Register.None)) {
+            s.append(sep).append(getIndex());
+            sep = " + ";
+        } else {
+            if (getDisplacement() < 0) {
+                s.append(" - ").append(-getDisplacement());
+            } else if (getDisplacement() > 0) {
+                s.append(sep).append(getDisplacement());
+            }
+        }
+        s.append("]");
+        return s.toString();
+    }
+
+    /**
+     * @return Base register that defines the start of the address computation. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getBase() {
+        return base;
+    }
+
+    /**
+     * @return Index register, the value of which is added to {@link #getBase}. If not present, is
+     *         denoted by {@link Register#None}.
+     */
+    public Register getIndex() {
+        return index;
+    }
+
+    /**
+     * @return true if this address has an index register
+     */
+    public boolean hasIndex() {
+        return !getIndex().equals(Register.None);
+    }
+
+    /**
+     * This method adds the stack-bias to the displacement if the base register is either
+     * {@link SPARC#sp} or {@link SPARC#fp}.
+     *
+     * @return Optional additive displacement.
+     */
+    public int getDisplacement() {
+        if (hasIndex()) {
+            throw new InternalError("address has index register");
+        }
+        // TODO Should we also hide the register save area size here?
+        if (getBase().equals(sp) || getBase().equals(fp)) {
+            return displacement + STACK_BIAS;
+        }
+        return displacement;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCAssembler.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,1747 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.sparc;
+
+import static com.oracle.graal.asm.sparc.SPARCAssembler.CC.*;
+import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.graal.asm.sparc.SPARCAssembler.Op.*;
+import static com.oracle.graal.asm.sparc.SPARCAssembler.Op3s.*;
+import static com.oracle.graal.asm.sparc.SPARCAssembler.Opfs.*;
+import static com.oracle.jvmci.sparc.SPARC.*;
+import static java.lang.String.*;
+
+import com.oracle.graal.asm.*;
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.meta.*;
+import com.oracle.jvmci.sparc.*;
+import com.oracle.jvmci.sparc.SPARC.CPUFeature;
+
+/**
+ * This class implements an assembler that can encode most SPARC instructions.
+ */
+public abstract class SPARCAssembler extends Assembler {
+
+    /**
+     * Constructs an assembler for the SPARC architecture.
+     *
+     * @param registerConfig the register configuration used to bind {@link Register#Frame} and
+     *            {@link Register#CallerFrame} to physical registers. This value can be null if this
+     *            assembler instance will not be used to assemble instructions using these logical
+     *            registers.
+     */
+    public SPARCAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target);
+    }
+
+    public static final int CCR_ICC_SHIFT = 0;
+    public static final int CCR_XCC_SHIFT = 4;
+    public static final int CCR_C_SHIFT = 0;
+    public static final int CCR_V_SHIFT = 1;
+    public static final int CCR_Z_SHIFT = 2;
+    public static final int CCR_N_SHIFT = 3;
+
+    protected static final int OP_SHIFT = 30;
+    protected static final int CBCOND_SHIFT = 28;
+    protected static final int OP2_SHIFT = 22;
+    protected static final int A_SHIFT = 29;
+
+    protected static final int A_MASK = 0b0010_0000_0000_0000_0000_0000_0000_0000;
+    protected static final int OP_MASK = 0b1100_0000_0000_0000_0000_0000_0000_0000;
+    protected static final int CBCOND_MASK = 0b0001_0000_0000_0000_0000_0000_0000_0000; // Used for
+    // distinguish CBcond and BPr instructions
+    protected static final int OP2_MASK = 0b0000_0001_1100_0000_0000_0000_0000_0000;
+
+    protected static final int DISP22_SHIFT = 0;
+    protected static final int DISP22_MASK = 0b00000000001111111111111111111111;
+
+    protected static final int DISP19_SHIFT = 0;
+    protected static final int DISP19_MASK = 0b00000000000001111111111111111111;
+
+    protected static final int D16HI_SHIFT = 20;
+    protected static final int D16HI_MASK = 0b0000_0000_0011_0000_0000_0000_0000_0000;
+    protected static final int D16LO_SHIFT = 0;
+    protected static final int D16LO_MASK = 0b0000_0000_0000_0000_0011_1111_1111_1111;
+
+    protected static final int D10LO_MASK = 0b0000_0000_0000_0000_0001_1111_1110_0000;
+    protected static final int D10HI_MASK = 0b0000_0000_0001_1000_0000_0000_0000_0000;
+    protected static final int D10LO_SHIFT = 5;
+    protected static final int D10HI_SHIFT = 19;
+
+    public enum Ops {
+        // @formatter:off
+
+        BranchOp(0b00),
+        CallOp(0b01),
+        ArithOp(0b10),
+        LdstOp(0b11);
+
+        // @formatter:on
+
+        private final int value;
+
+        private Ops(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public boolean appliesTo(int instructionWord) {
+            int opShift = 30;
+            return (instructionWord >>> opShift) == value;
+        }
+    }
+
+    public enum Op {
+        Op00(0b00),
+        Op01(0b01),
+        Op10(0b10),
+        Op11(0b11);
+        int op;
+
+        Op(int op) {
+            this.op = op;
+        }
+    }
+
+    public enum Op2s {
+        // @formatter:off
+
+        Illtrap(0b000),
+        Bpr    (0b011),
+        Fb     (0b110),
+        Fbp    (0b101),
+        Br     (0b010),
+        Bp     (0b001),
+        Cb     (0b111),
+        Sethi  (0b100);
+
+
+        // @formatter:on
+
+        private final int value;
+
+        private Op2s(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public static Op2s byValue(int value) {
+            for (Op2s op : values()) {
+                if (op.getValue() == value) {
+                    return op;
+                }
+            }
+            return null;
+        }
+    }
+
+    public enum Op3s {
+        // @formatter:off
+
+        Add(0x00, "add", Op10),
+        And(0x01, "and", Op10),
+        Or(0x02, "or", Op10),
+        Xor(0x03, "xor", Op10),
+        Sub(0x04, "sub", Op10),
+        Andn(0x05, "andn", Op10),
+        Orn(0x06, "orn", Op10),
+        Xnor(0x07, "xnor", Op10),
+        Addc(0x08, "addc", Op10),
+        Mulx(0x09, "mulx", Op10),
+        Umul(0x0A, "umul", Op10),
+        Smul(0x0B, "smul", Op10),
+        Subc(0x0C, "subc", Op10),
+        Udivx(0x0D, "udivx", Op10),
+        Udiv(0x0E, "udiv", Op10),
+        Sdiv(0x0F, "sdiv", Op10),
+
+        Addcc(0x10, "addcc", Op10),
+        Andcc(0x11, "andcc", Op10),
+        Orcc(0x12, "orcc", Op10),
+        Xorcc(0x13, "xorcc", Op10),
+        Subcc(0x14, "subcc", Op10),
+        Andncc(0x15, "andncc", Op10),
+        Orncc(0x16, "orncc", Op10),
+        Xnorcc(0x17, "xnorcc", Op10),
+        Addccc(0x18, "addccc", Op10),
+
+        Umulcc(0x1A, "umulcc", Op10),
+        Smulcc(0x1B, "smulcc", Op10),
+        Subccc(0x1C, "subccc", Op10),
+        Udivcc(0x1E, "udivcc", Op10),
+        Sdivcc(0x1F, "sdivcc", Op10),
+
+        Taddcc(0x20, "taddcc", Op10),
+        Tsubcc(0x21, "tsubcc", Op10),
+        Taddcctv(0x22, "taddcctv", Op10),
+        Tsubcctv(0x23, "tsubcctv", Op10),
+        Mulscc(0x24, "mulscc", Op10),
+        Sll(0x25, "sll", Op10),
+        Sllx(0x25, "sllx", Op10),
+        Srl(0x26, "srl", Op10),
+        Srlx(0x26, "srlx", Op10),
+        Sra(0x27, "srax", Op10),
+        Srax(0x27, "srax", Op10),
+        Membar(0x28, "membar", Op10),
+
+        Flushw(0x2B, "flushw", Op10),
+        Movcc(0x2C, "movcc", Op10),
+        Sdivx(0x2D, "sdivx", Op10),
+        Popc(0x2E, "popc", Op10),
+        Movr(0x2F, "movr", Op10),
+
+        Fpop1(0b11_0100, "fpop1", Op10),
+        Fpop2(0b11_0101, "fpop2", Op10),
+        Impdep1(0b11_0110, "impdep1", Op10),
+        Impdep2(0b11_0111, "impdep2", Op10),
+        Jmpl(0x38, "jmpl", Op10),
+        Rett(0x39, "rett", Op10),
+        Trap(0x3a, "trap", Op10),
+        Flush(0x3b, "flush", Op10),
+        Save(0x3c, "save", Op10),
+        Restore(0x3d, "restore", Op10),
+        Retry(0x3e, "retry", Op10),
+
+
+        Casa(0b111100, "casa", Op11),
+        Casxa(0b111110, "casxa", Op11),
+        Prefetch(0b101101, "prefetch", Op11),
+        Prefetcha(0b111101, "prefetcha", Op11),
+
+        Lduw  (0b00_0000, "lduw", Op11),
+        Ldub  (0b00_0001, "ldub", Op11),
+        Lduh  (0b00_0010, "lduh", Op11),
+        Stw   (0b00_0100, "stw", Op11),
+        Stb   (0b00_0101, "stb", Op11),
+        Sth   (0b00_0110, "sth", Op11),
+        Ldsw  (0b00_1000, "ldsw", Op11),
+        Ldsb  (0b00_1001, "ldsb", Op11),
+        Ldsh  (0b00_1010, "ldsh", Op11),
+        Ldx   (0b00_1011, "ldx", Op11),
+        Stx   (0b00_1110, "stx", Op11),
+
+        Ldf   (0b10_0000, "ldf", Op11),
+        Ldfsr (0b10_0001, "ldfsr", Op11),
+        Ldaf  (0b10_0010, "ldaf", Op11),
+        Lddf  (0b10_0011, "lddf", Op11),
+        Stf   (0b10_0100, "stf", Op11),
+        Stfsr (0b10_0101, "stfsr", Op11),
+        Staf  (0x10_0110, "staf", Op11),
+        Stdf  (0b10_0111, "stdf", Op11),
+
+        Rd    (0b10_1000, "rd", Op10),
+        Wr    (0b11_0000, "wr", Op10),
+        Fcmp  (0b11_0101, "fcmp", Op10),
+
+        Ldxa  (0b01_1011, "ldxa", Op11),
+        Lduwa (0b01_0000, "lduwa", Op11),
+
+        Tcc(0b11_1010, "tcc", Op10);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private final Op op;
+
+        private Op3s(int value, String name, Op op) {
+            this.value = value;
+            this.operator = name;
+            this.op = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public boolean appliesTo(int instructionWord) {
+            return ((instructionWord >>> 19) & 0b1_1111) == value;
+        }
+    }
+
+    public enum Opfs {
+        // @formatter:off
+
+        Fmovs(0b0_0000_0001, "fmovs"),
+        Fmovd(0b0_0000_0010, "fmovd"),
+        Fmovq(0b0_0000_0011, "fmovq"),
+        Fmovscc(0b00_0001, "fmovscc"),
+        Fmovdcc(0b00_0010, "fmovdcc"),
+        Fnegs(0x05, "fnegs"),
+        Fnegd(0x06, "fnegd"),
+        Fnegq(0x07, "fnegq"),
+        Fabss(0x09, "fabss"),
+        Fabsd(0x0A, "fabsd"),
+        Fabsq(0x0B, "fabsq"),
+
+        // start VIS1
+        Edge8cc(0x0, "edge8cc"),
+        Edge8n(0x1, "edge8n"),
+        Edge8lcc(0x2, "edge8lcc"),
+        Edge8ln(0x3, "edge8ln"),
+        Edge16cc(0x4, "edge16cc"),
+        Edge16n(0x5, "edge16n"),
+        Edge16lcc(0x6, "edge16lcc"),
+        Edge16ln(0x7, "edge16ln"),
+        Edge32cc(0x8, "edge32cc"),
+        Edge32n(0x9, "edge32n"),
+        Edge32lcc(0xA, "edge32lcc"),
+        Edge32ln(0xB, "edge32ln"),
+        Array8(0x10, "array8"),
+        Array16(0x12, "array16"),
+        Array32(0x14, "array32"),
+        AlignAddress(0x18, "alignaddress"),
+        AlignAddressLittle(0x1A, "alignaddress_little"),
+        Fpcmple16(0x20, "fpcmple16"),
+        Fpcmpne16(0x22, "fpcmpne16"),
+        Fpcmple32(0x24, "fpcmple32"),
+        Fpcmpne32(0x26, "fpcmpne32"),
+        Fpcmpgt16(0x28, "fpcmpgt16"),
+        Fpcmpeq16(0x2A, "fpcmpeq16"),
+        Fpcmpgt32(0x2C, "fpcmpgt32"),
+        Fpcmpeq32(0x2E, "fpcmpeq32"),
+        Fmul8x16(0x31, "fmul8x16"),
+        Fmul8x16au(0x33, "fmul8x16au"),
+        Fmul8x16al(0x35, "fmul8x16al"),
+        Fmul8sux16(0x36, "fmul8sux16"),
+        Fmul8ulx16(0x37, "fmul8ulx16"),
+        Fmuld8sux16(0x38, "fmuld8sux16"),
+        Fmuld8ulx16(0x39, "fmuld8ulx16"),
+        Fpack32(0x3A, "fpack32"),
+        Fpack16(0x3B, "fpack16"),
+        Fpackfix(0x3D, "fpackfix"),
+        Faligndatag(0x48, "faligndata"),
+        Fpmerge(0x4B, "fpmerge"),
+        Fpadd16(0x50, "fpadd16"),
+        Fpadd16s(0x51, "fpadd16s"),
+        Fpadd32(0x52, "fpadd32"),
+        Fpadd32s(0x53, "fpadd32s"),
+        Fpsub16(0x54, "fpadd16"),
+        Fpsub16s(0x55, "fpadd16s"),
+        Fpsub32(0x56, "fpadd32"),
+        Fpsub32s(0x57, "fpadd32s"),
+        Fzerod(0x60, "fzerod"),
+        Fzeros(0x61, "fzeros"),
+        Fnot2d(0x66, "fnot1d"),
+        Fnot2s(0x67, "fnot1s"),
+        Fnot1d(0x6A, "fnot1d"),
+        Fnot1s(0x6B, "fnot1s"),
+        Fsrc1d(0x74, "fsrc1d"),
+        Fsrc1s(0x75, "fsrc1s"),
+        Fsrc2d(0x78, "fsrc2d"),
+        Fsrc2s(0x79, "fsrc2s"),
+        Foned(0x7E, "foned"),
+        Fones(0x7F, "fones"),
+        Fandd(0b0_0111_0000, "fandd"),
+        Fands(0b0_0111_0001, "fands"),
+        Fxord(0b0_0110_1100, "fxord"),
+        Fxors(0b0_0110_1101, "fxors"),
+        // end VIS1
+
+        // start VIS2
+        Bmask(0x19, "bmask"),
+        Bshuffle(0x4c, "bshuffle"),
+        // end VIS2 only
+
+        // start VIS3
+        Addxc(0x11, "addxc"),
+        Addxccc(0x13, "addxccc"),
+        Cmask8(0x1B, "cmask8"),
+        Cmask16(0x1D, "cmask16"),
+        Cmask32(0x1F, "cmask32"),
+        Fmean16(0x40, "fmean16"),
+        Fnadds(0x51, "fnadds"),
+        Fnaddd(0x52, "fnaddd"),
+        Fnmuls(0x59, "fnmuls"),
+        Fnmuld(0x5A, "fnmuld"),
+        Fnsmuld(0x79, "fnsmuld"),
+        Fnhadds(0x71, "fnhadds"),
+        Fnhaddd(0x72, "fnhaddd"),
+        Movdtox(0x110, "movdtox"),
+        Movstouw(0x111, "movstouw"),
+        Movstosw(0x113, "movstosw"),
+        Movxtod(0x118, "movxtod"),
+        Movwtos(0b1_0001_1001, "movwtos"),
+        UMulxhi(0b0_0001_0110, "umulxhi"),
+        Lzcnt  (0b0_0001_0111, "lzcnt"),
+        // end VIS3
+
+        // start CAMMELLIA
+        CammelliaFl(0x13C, "cammelia_fl"),
+        CammelliaFli(0x13D, "cammellia_fli"),
+        // end CAMMELLIA
+
+        // start CRYPTO
+        Crc32c(0x147, "crc32c"),
+        // end CRYPTO
+
+        // start OSA 2011
+        Fpadd64(0x44, "fpadd64"),
+        Fpsub64(0x46, "fpsub64"),
+        Fpadds16(0x58, "fpadds16"),
+        Fpadds16s(0x59, "fpadds16"),
+        Fpadds32(0x5A, "fpadds32"),
+        Fpadds32s(0x5B, "fpadds32s"),
+        Fpsubs16(0x5C, "fpsubs16"),
+        Fpsubs16s(0x5D, "fpsubs16s"),
+        Fpsubs32(0x5E, "fpsubs32"),
+        Fpsubs32s(0x5F, "fpsubs32s"),
+        Fpcmpne8(0x122, "fpcmpne8"),
+        Fpcmpeq8(0x12C, "fpcmpeq8"),
+        // end OSA 2011
+
+        Fadds(0x41, "fadds"),
+        Faddd(0x42, "faddd"),
+        Faddq(0x43, "faddq"),
+        Fsubs(0x45, "fsubs"),
+        Fsubd(0x46, "fsubd"),
+        Fsubq(0x47, "fsubq"),
+        Fmuls(0x49, "fmuls"),
+        Fmuld(0x4A, "fmuld"),
+        Fdivs(0x4D, "fdivs"),
+        Fdivd(0x4E, "fdivd"),
+        Fdivq(0x4F, "fdivq"),
+
+        Fsqrts(0x29, "fsqrts"),
+        Fsqrtd(0x2A, "fsqrtd"),
+        Fsqrtq(0x2B, "fsqrtq"),
+
+        Fsmuld(0x69, "fsmuld"),
+        Fmulq(0x6B, "fmulq"),
+        Fdmuldq(0x6E, "fdmulq"),
+
+        Fstoi(0xD1, "fstoi"),
+        Fdtoi(0xD2, "fdtoi"),
+        Fstox(0x81, "fstox"),
+        Fdtox(0x82, "fdtox"),
+        Fxtos(0x84, "fxtos"),
+        Fxtod(0x88, "fxtod"),
+        Fxtoq(0x8C, "fxtoq"),
+        Fitos(0xC4, "fitos"),
+        Fdtos(0xC6, "fdtos"),
+        Fitod(0xC8, "fitod"),
+        Fstod(0xC9, "fstod"),
+        Fitoq(0xCC, "fitoq"),
+
+
+        Fcmps(0x51, "fcmps"),
+        Fcmpd(0x52, "fcmpd"),
+        Fcmpq(0x53, "fcmpq");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private Opfs(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    public enum Annul {
+        ANNUL(1),
+        NOT_ANNUL(0);
+        public final int flag;
+
+        Annul(int flag) {
+            this.flag = flag;
+        }
+    }
+
+    public enum BranchPredict {
+        PREDICT_TAKEN(1),
+        PREDICT_NOT_TAKEN(0);
+        public final int flag;
+
+        BranchPredict(int flag) {
+            this.flag = flag;
+        }
+    }
+
+    public enum MembarMask {
+        // @formatter:off
+
+        StoreStore(1 << 3, "storestore"),
+        LoadStore(1 << 2, "loadstore"),
+        StoreLoad(1 << 1, "storeload"),
+        LoadLoad(1 << 0, "loadload"),
+        Sync(1 << 6, "sync"),
+        MemIssue(1 << 5, "memissue"),
+        LookAside(1 << 4, "lookaside");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private MembarMask(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value | 0x2000;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    /**
+     * Condition Codes to use for instruction.
+     */
+    public enum CC {
+        // @formatter:off
+        /**
+         * Condition is considered as 32bit operation condition.
+         */
+        Icc(0b00, "icc", false),
+        /**
+         * Condition is considered as 64bit operation condition.
+         */
+        Xcc(0b10, "xcc", false),
+        Fcc0(0b00, "fcc0", true),
+        Fcc1(0b01, "fcc1", true),
+        Fcc2(0b10, "fcc2", true),
+        Fcc3(0b11, "fcc3", true);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private boolean isFloat;
+
+        private CC(int value, String op, boolean isFloat) {
+            this.value = value;
+            this.operator = op;
+            this.isFloat = isFloat;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public static CC forKind(Kind kind) {
+            boolean isInt = kind == Kind.Boolean || kind == Kind.Byte || kind == Kind.Char || kind == Kind.Short || kind == Kind.Int;
+            boolean isFloat = kind == Kind.Float || kind == Kind.Double;
+            boolean isLong = kind == Kind.Long || kind == Kind.Object;
+            assert isInt || isFloat || isLong;
+            if (isLong) {
+                return Xcc;
+            } else if (isInt) {
+                return Icc;
+            } else if (isFloat) {
+                return Fcc0;
+            } else {
+                throw new InternalError();
+            }
+        }
+    }
+
+    public enum ConditionFlag {
+        // @formatter:off
+
+        // for FBfcc & FBPfcc instruction
+        F_Never(0, "f_never"),
+        F_NotEqual(1, "f_notEqual"),
+        F_LessOrGreater(2, "f_lessOrGreater"),
+        F_UnorderedOrLess(3, "f_unorderedOrLess"),
+        F_Less(4, "f_less"),
+        F_UnorderedOrGreater(5, "f_unorderedOrGreater"),
+        F_Greater(6, "f_greater"),
+        F_Unordered(7, "f_unordered"),
+        F_Always(8, "f_always"),
+        F_Equal(9, "f_equal"),
+        F_UnorderedOrEqual(10, "f_unorderedOrEqual"),
+        F_GreaterOrEqual(11, "f_greaterOrEqual"),
+        F_UnorderedGreaterOrEqual(12, "f_unorderedGreaterOrEqual"),
+        F_LessOrEqual(13, "f_lessOrEqual"),
+        F_UnorderedOrLessOrEqual(14, "f_unorderedOrLessOrEqual"),
+        F_Ordered(15, "f_ordered"),
+
+        // for integers
+        Never(0, "never"),
+        Equal(1, "equal", true),
+        Zero(1, "zero"),
+        LessEqual(2, "lessEqual", true),
+        Less(3, "less", true),
+        LessEqualUnsigned(4, "lessEqualUnsigned", true),
+        LessUnsigned(5, "lessUnsigned", true),
+        CarrySet(5, "carrySet"),
+        Negative(6, "negative", true),
+        OverflowSet(7, "overflowSet", true),
+        Always(8, "always"),
+        NotEqual(9, "notEqual", true),
+        NotZero(9, "notZero"),
+        Greater(10, "greater", true),
+        GreaterEqual(11, "greaterEqual", true),
+        GreaterUnsigned(12, "greaterUnsigned", true),
+        GreaterEqualUnsigned(13, "greaterEqualUnsigned", true),
+        CarryClear(13, "carryClear"),
+        Positive(14, "positive", true),
+        OverflowClear(15, "overflowClear", true);
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+        private boolean forCBcond = false;
+
+        private ConditionFlag(int value, String op) {
+            this(value, op, false);
+        }
+
+        private ConditionFlag(int value, String op, boolean cbcond) {
+            this.value = value;
+            this.operator = op;
+            this.forCBcond = cbcond;
+        }
+
+        public boolean isCBCond() {
+            return forCBcond;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+
+        public ConditionFlag negate() {
+            //@formatter:off
+            switch (this) {
+                case F_Never                  : return F_Always;
+                case F_Always                 : return F_Never;
+                case F_NotEqual               : return F_Equal;
+                case F_Equal                  : return F_NotEqual;
+                case F_LessOrGreater          : return F_UnorderedOrEqual;
+                case F_UnorderedOrEqual       : return F_LessOrGreater;
+                case F_Less                   : return F_UnorderedGreaterOrEqual;
+                case F_UnorderedGreaterOrEqual: return F_Less;
+                case F_LessOrEqual            : return F_UnorderedOrGreater;
+                case F_UnorderedOrGreater     : return F_LessOrEqual;
+                case F_Greater                : return F_UnorderedOrLessOrEqual;
+                case F_UnorderedOrLessOrEqual : return F_Greater;
+                case F_GreaterOrEqual         : return F_UnorderedOrLess;
+                case F_UnorderedOrLess        : return F_GreaterOrEqual;
+                case F_Unordered              : return F_Ordered;
+                case F_Ordered                : return F_Unordered;
+                case Never                    : return Always;
+                case Always                   : return Never;
+                case Equal                    : return NotEqual;
+                case NotEqual                 : return Equal;
+                case Zero                     : return NotZero;
+                case NotZero                  : return Zero;
+                case LessEqual                : return Greater;
+                case Greater                  : return LessEqual;
+                case Less                     : return GreaterEqual;
+                case GreaterEqual             : return Less;
+                case LessEqualUnsigned        : return GreaterUnsigned;
+                case GreaterUnsigned          : return LessEqualUnsigned;
+                case LessUnsigned             : return GreaterEqualUnsigned;
+                case GreaterEqualUnsigned     : return LessUnsigned;
+                case CarrySet                 : return CarryClear;
+                case CarryClear               : return CarrySet;
+                case Negative                 : return Positive;
+                case Positive                 : return Negative;
+                case OverflowSet              : return OverflowClear;
+                case OverflowClear            : return OverflowSet;
+                default:
+                    throw new InternalError();
+            }
+            //@formatter:on
+        }
+
+        public ConditionFlag mirror() {
+            switch (this) {
+            //@formatter:off
+                case F_Less                   : return F_Greater;
+                case F_Greater                : return F_Less;
+                case F_LessOrEqual            : return F_GreaterOrEqual;
+                case F_UnorderedGreaterOrEqual: return F_UnorderedOrLessOrEqual;
+                case F_UnorderedOrGreater     : return F_UnorderedOrLess;
+                case F_UnorderedOrLessOrEqual : return F_UnorderedGreaterOrEqual;
+                case F_GreaterOrEqual         : return F_LessOrEqual;
+                case F_UnorderedOrLess        : return F_UnorderedOrGreater;
+                case LessEqual                : return GreaterEqual;
+                case Greater                  : return Less;
+                case Less                     : return Greater;
+                case GreaterEqual             : return LessEqual;
+                case LessEqualUnsigned        : return GreaterEqualUnsigned;
+                case GreaterUnsigned          : return LessUnsigned;
+                case LessUnsigned             : return GreaterUnsigned;
+                case GreaterEqualUnsigned     : return LessEqualUnsigned;
+                default:
+                    return this;
+                //@formatter:on
+            }
+        }
+
+    }
+
+    public enum RCondition {
+        // @formatter:off
+
+        Rc_z(0b001, "rc_z"),
+        Rc_lez(0b010, "rc_lez"),
+        Rc_lz(0b011, "rc_lz"),
+        Rc_nz(0b101, "rc_nz"),
+        Rc_gz(0b110, "rc_gz"),
+        Rc_gez(0b111, "rc_gez"),
+        Rc_last(Rc_gez.getValue(), "rc_last");
+
+        // @formatter:on
+
+        private final int value;
+        private final String operator;
+
+        private RCondition(int value, String op) {
+            this.value = value;
+            this.operator = op;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public String getOperator() {
+            return operator;
+        }
+    }
+
+    /**
+     * Represents the <b>Address Space Identifier</b> defined in the SPARC architecture.
+     */
+    public enum Asi {
+        // @formatter:off
+
+        INVALID(-1),
+        ASI_PRIMARY(0x80),
+        ASI_PRIMARY_NOFAULT(0x82),
+        ASI_PRIMARY_LITTLE(0x88),
+        // Block initializing store
+        ASI_ST_BLKINIT_PRIMARY(0xE2),
+        // Most-Recently-Used (MRU) BIS variant
+        ASI_ST_BLKINIT_MRU_PRIMARY(0xF2);
+
+        // @formatter:on
+
+        private final int value;
+
+        private Asi(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+
+        public boolean isValid() {
+            return value != INVALID.getValue();
+        }
+    }
+
+    public enum Fcn {
+        SeveralWritesAndPossiblyReads(2),
+        SeveralReadsWeak(0),
+        OneRead(1),
+        OneWrite(3),
+        Page(4),
+        NearestUnifiedCache(17),
+        SeveralReadsStrong(20),
+        OneReadStrong(21),
+        SeveralWritesAndPossiblyReadsStrong(22),
+        OneWriteStrong(23);
+
+        private final int value;
+
+        private Fcn(int value) {
+            this.value = value;
+        }
+
+        public int getValue() {
+            return value;
+        }
+    }
+
+    public boolean hasFeature(CPUFeature feature) {
+        return ((SPARC) this.target.arch).features.contains(feature);
+    }
+
+    public static final int simm(int x, int nbits) {
+        // assert_signed_range(x, nbits);
+        return x & ((1 << nbits) - 1);
+    }
+
+    public static final boolean isImm(int x, int nbits) {
+        // assert_signed_range(x, nbits);
+        return simm(x, nbits) == x;
+    }
+
+    /**
+     * Minimum value for signed immediate ranges.
+     */
+    public static long minSimm(long nbits) {
+        return -(1L << (nbits - 1));
+    }
+
+    /**
+     * Maximum value for signed immediate ranges.
+     */
+    public static long maxSimm(long nbits) {
+        return (1L << (nbits - 1)) - 1;
+    }
+
+    /**
+     * Test if imm is within signed immediate range for nbits.
+     */
+    public static boolean isSimm(long imm, int nbits) {
+        return minSimm(nbits) <= imm && imm <= maxSimm(nbits);
+    }
+
+    public static boolean isSimm10(long imm) {
+        return isSimm(imm, 10);
+    }
+
+    public static boolean isSimm11(long imm) {
+        return isSimm(imm, 11);
+    }
+
+    public static boolean isSimm11(JavaConstant constant) {
+        return constant.isNull() || isSimm11(constant.asLong());
+    }
+
+    public static boolean isSimm5(JavaConstant constant) {
+        return constant.isNull() || isSimm(constant.asLong(), 5);
+    }
+
+    public static boolean isSimm13(int imm) {
+        return isSimm(imm, 13);
+    }
+
+    public static boolean isSimm13(JavaConstant constant) {
+        return constant.isNull() || isSimm13(constant.asLong());
+    }
+
+    public static boolean isSimm13(long imm) {
+        return NumUtil.isInt(imm) && isSimm(imm, 13);
+    }
+
+    public static boolean isWordDisp30(long imm) {
+        return isSimm(imm, 30 + 2);
+    }
+
+    public static final int hi22(int x) {
+        return x >>> 10;
+    }
+
+    public static final int lo10(int x) {
+        return x & ((1 << 10) - 1);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for Fmt00 instructions. This abstraction is needed as it
+     * makes the patching easier later on.
+     * <pre>
+     * | 00  |    a   | op2 |               b                         |
+     * |31 30|29    25|24 22|21                                      0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt00(int a, int op2, int b) {
+        assert isImm(a, 5) && isImm(op2, 3) && isImm(b, 22) : String.format("a: 0x%x op2: 0x%x b: 0x%x", a, op2, b);
+        this.emitInt(a << 25 | op2 << 22 | b);
+    }
+
+    private void op3(Op3s op3, Opfs opf, Register rs1, Register rs2, Register rd) {
+        int b = opf.value << 5 | (rs2 == null ? 0 : rs2.encoding);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b);
+    }
+
+    protected void op3(Op3s op3, Register rs1, Register rs2, Register rd) {
+        int b = rs2 == null ? 0 : rs2.encoding;
+        int xBit = getXBit(op3);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1 == null ? 0 : rs1.encoding, b | xBit);
+    }
+
+    protected void op3(Op3s op3, Register rs1, int simm13, Register rd) {
+        assert isSimm13(simm13);
+        int i = 1 << 13;
+        int simm13WithX = simm13 | getXBit(op3);
+        fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, i | simm13WithX & ((1 << 13) - 1));
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| 010 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bicc(ConditionFlag cond, Annul annul, Label l) {
+        bcc(Op2s.Br, cond, annul, l);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Floating-Point Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| 110 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fbcc(ConditionFlag cond, Annul annul, Label l) {
+        bcc(Op2s.Fb, cond, annul, l);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on (Integer|Floatingpoint) Condition Codes.
+     * <pre>
+     * | 00  |annul| cond| op2 |               disp22                 |
+     * |31 30|29   |28 25|24 22|21                                   0|
+     * </pre>
+     */
+    // @formatter:on
+    private void bcc(Op2s op2, ConditionFlag cond, Annul annul, Label l) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 22;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        fmt00(a, op2.getValue(), pos);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes with Prediction.
+     * <pre>
+     * | 00  |an|cond | 001 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        bpcc(Op2s.Bp, cond, annul, l, cc, predictTaken);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Condition Codes with Prediction.
+     * <pre>
+     * | 00  |an|cond | 101 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fbpcc(ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        bpcc(Op2s.Fbp, cond, annul, l, cc, predictTaken);
+    }
+
+    // @formatter:off
+    /**
+     * Used for fbpcc (Float) and bpcc (Integer).
+     * <pre>
+     * | 00  |an|cond | op2 |cc1 2|p |           disp19               |
+     * |31 30|29|28 25|24 22|21 20|19|                               0|
+     * </pre>
+     */
+    // @formatter:on
+    private void bpcc(Op2s op2, ConditionFlag cond, Annul annul, Label l, CC cc, BranchPredict predictTaken) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 19;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        int b = (cc.getValue() << 20) | ((predictTaken.flag) << 19) | pos;
+        fmt00(a, op2.getValue(), b);
+    }
+
+    // @formatter:off
+    /**
+     * Branch on Integer Register with Prediction.
+     * <pre>
+     * | 00  |an| 0|rcond | 011 |d16hi|p | rs1 |    d16lo             |
+     * |31 30|29|28|27 25 |24 22|21 20|19|18 14|                     0|
+     * </pre>
+     */
+    // @formatter:on
+    public void bpr(RCondition cond, Annul annul, Label l, BranchPredict predictTaken, Register rs1) {
+        int pos = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        final int disp = 16;
+        assert isSimm(pos, disp);
+        pos &= (1 << disp) - 1;
+        int a = (annul.flag << 4) | cond.getValue();
+        int d16hi = (pos >> 13) << 13;
+        int d16lo = d16hi ^ pos;
+        int b = (d16hi << 20) | (predictTaken.flag << 19) | (rs1.encoding() << 14) | d16lo;
+        fmt00(a, Op2s.Bpr.getValue(), b);
+    }
+
+    private int patchUnbound(Label label) {
+        label.addPatchAt(position());
+        return 0;
+    }
+
+    public void cbcondw(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
+        cbcond(0, 0, cf, rs1, rs2.encoding, lab);
+    }
+
+    public void cbcondw(ConditionFlag cf, Register rs1, int rs2, Label lab) {
+        assert isSimm(rs2, 5);
+        cbcond(0, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
+    }
+
+    public void cbcondx(ConditionFlag cf, Register rs1, Register rs2, Label lab) {
+        cbcond(1, 0, cf, rs1, rs2.encoding, lab);
+    }
+
+    public void cbcondx(ConditionFlag cf, Register rs1, int rs2, Label lab) {
+        assert isSimm(rs2, 5);
+        cbcond(1, 1, cf, rs1, rs2 & ((1 << 5) - 1), lab);
+    }
+
+    private void cbcond(int cc2, int i, ConditionFlag cf, Register rs1, int rs2, Label l) {
+        int disp10 = !l.isBound() ? patchUnbound(l) : (l.position() - position()) / 4;
+        assert isSimm(disp10, 10) && isImm(rs2, 5);
+        disp10 &= (1 << 10) - 1;
+        final int cLo = cf.value & 0b111;
+        final int cHi = cf.value >> 3;
+        final int d10Lo = disp10 & ((1 << 8) - 1);
+        final int d10Hi = disp10 >> 8;
+        int a = cHi << 4 | 0b1000 | cLo;
+        int b = cc2 << 21 | d10Hi << D10HI_SHIFT | rs1.encoding << 14 | i << 13 | d10Lo << D10LO_SHIFT | rs2;
+        fmt00(a, Op2s.Bpr.value, b);
+    }
+
+    // @formatter:off
+    /**
+     * NOP.
+     * <pre>
+     * | 00  |00000| 100 |                0                    |
+     * |31 30|29 25|24 22|21                                  0|
+     * </pre>
+     */
+    // @formatter:on
+    public void nop() {
+        emitInt(1 << 24);
+    }
+
+    public void sethi(int imm22, Register dst) {
+        fmt00(dst.encoding, Op2s.Sethi.value, imm22);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for calls.
+     * <pre>
+     * | 01  |                      disp30                             |
+     * |31 30|29                                                      0|
+     * </pre>
+     */
+    // @formatter:on
+    public void call(int disp30) {
+        assert isImm(disp30, 30);
+        int instr = 1 << 30;
+        instr |= disp30;
+        emitInt(instr);
+    }
+
+    public void add(Register rs1, Register rs2, Register rd) {
+        op3(Add, rs1, rs2, rd);
+    }
+
+    public void add(Register rs1, int simm13, Register rd) {
+        op3(Add, rs1, simm13, rd);
+    }
+
+    public void addc(Register rs1, Register rs2, Register rd) {
+        op3(Addc, rs1, rs2, rd);
+    }
+
+    public void addc(Register rs1, int simm13, Register rd) {
+        op3(Addc, rs1, simm13, rd);
+    }
+
+    public void addcc(Register rs1, Register rs2, Register rd) {
+        op3(Addcc, rs1, rs2, rd);
+    }
+
+    public void addcc(Register rs1, int simm13, Register rd) {
+        op3(Addcc, rs1, simm13, rd);
+    }
+
+    public void and(Register rs1, Register rs2, Register rd) {
+        op3(And, rs1, rs2, rd);
+    }
+
+    public void and(Register rs1, int simm13, Register rd) {
+        op3(And, rs1, simm13, rd);
+    }
+
+    public void andcc(Register rs1, Register rs2, Register rd) {
+        op3(Andcc, rs1, rs2, rd);
+    }
+
+    public void andcc(Register rs1, int simm13, Register rd) {
+        op3(Andcc, rs1, simm13, rd);
+    }
+
+    public void andn(Register rs1, Register rs2, Register rd) {
+        op3(Andn, rs1, rs2, rd);
+    }
+
+    public void andn(Register rs1, int simm13, Register rd) {
+        op3(Andn, rs1, simm13, rd);
+    }
+
+    public void andncc(Register rs1, Register rs2, Register rd) {
+        op3(Andncc, rs1, rs2, rd);
+    }
+
+    public void andncc(Register rs1, int simm13, Register rd) {
+        op3(Andncc, rs1, simm13, rd);
+    }
+
+    public void movwtos(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rd) && isCPURegister(rs2) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movwtos, null, rs2, rd);
+    }
+
+    public void umulxhi(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, UMulxhi, rs1, rs2, rd);
+    }
+
+    public void fdtos(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rd) && isDoubleFloatRegister(rs2) : String.format("%s %s", rs2, rd);
+        op3(Fpop1, Fdtos, null, rs2, rd);
+    }
+
+    public void movstouw(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movstosw, null, rs2, rd);
+    }
+
+    public void movstosw(Register rs2, Register rd) {
+        assert isSingleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movstosw, null, rs2, rd);
+    }
+
+    public void movdtox(Register rs2, Register rd) {
+        assert isDoubleFloatRegister(rs2) && isCPURegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movdtox, null, rs2, rd);
+    }
+
+    public void movxtod(Register rs2, Register rd) {
+        assert isCPURegister(rs2) && isDoubleFloatRegister(rd) : String.format("%s %s", rs2, rd);
+        op3(Impdep1, Movxtod, null, rs2, rd);
+    }
+
+    public void fadds(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fadds, rs1, rs2, rd);
+    }
+
+    public void faddd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Faddd, rs1, rs2, rd);
+    }
+
+    public void faddq(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Faddq, rs1, rs2, rd);
+    }
+
+    public void fdivs(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fdivs, rs1, rs2, rd);
+    }
+
+    public void fdivd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fdivd, rs1, rs2, rd);
+    }
+
+    public void fmovs(Register rs2, Register rd) {
+        op3(Fpop1, Fmovs, null, rs2, rd);
+    }
+
+    public void fmovd(Register rs2, Register rd) {
+        op3(Fpop1, Fmovd, null, rs2, rd);
+    }
+
+    public void fmuls(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fmuls, rs1, rs2, rd);
+    }
+
+    public void fsmuld(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsmuld, rs1, rs2, rd);
+    }
+
+    public void fmuld(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fmuld, rs1, rs2, rd);
+    }
+
+    public void fnegs(Register rs2, Register rd) {
+        op3(Fpop1, Fnegs, null, rs2, rd);
+    }
+
+    public void fnegd(Register rs2, Register rd) {
+        op3(Fpop1, Fnegd, null, rs2, rd);
+    }
+
+    /**
+     * Helper method to determine if the instruction needs the X bit set.
+     */
+    private static int getXBit(Op3s op3) {
+        switch (op3) {
+            case Sllx:
+            case Srax:
+            case Srlx:
+                return 1 << 12;
+            default:
+                return 0;
+        }
+    }
+
+    public void fstoi(Register rs2, Register rd) {
+        op3(Fpop1, Fstoi, null, rs2, rd);
+    }
+
+    public void fstox(Register rs2, Register rd) {
+        op3(Fpop1, Fstox, null, rs2, rd);
+    }
+
+    public void fdtox(Register rs2, Register rd) {
+        op3(Fpop1, Fdtox, null, rs2, rd);
+    }
+
+    public void fstod(Register rs2, Register rd) {
+        op3(Fpop1, Fstod, null, rs2, rd);
+    }
+
+    public void fdtoi(Register rs2, Register rd) {
+        op3(Fpop1, Fdtoi, null, rs2, rd);
+    }
+
+    public void fitos(Register rs2, Register rd) {
+        op3(Fpop1, Fitos, null, rs2, rd);
+    }
+
+    public void fitod(Register rs2, Register rd) {
+        op3(Fpop1, Fitod, null, rs2, rd);
+    }
+
+    public void fxtos(Register rs2, Register rd) {
+        op3(Fpop1, Fxtos, null, rs2, rd);
+    }
+
+    public void fxtod(Register rs2, Register rd) {
+        op3(Fpop1, Fxtod, null, rs2, rd);
+    }
+
+    public void fzeros(Register rd) {
+        op3(Impdep1, Fzeros, null, null, rd);
+    }
+
+    public void fzerod(Register rd) {
+        op3(Impdep1, Fzerod, null, null, rd);
+    }
+
+    public void flushw() {
+        op3(Flushw, g0, g0, g0);
+    }
+
+    public void fsqrtd(Register rs2, Register rd) {
+        op3(Fpop1, Fsqrtd, null, rs2, rd);
+    }
+
+    public void fsqrts(Register rs2, Register rd) {
+        op3(Fpop1, Fsqrts, null, rs2, rd);
+    }
+
+    public void fabss(Register rs2, Register rd) {
+        op3(Fpop1, Fabss, null, rs2, rd);
+    }
+
+    public void fabsd(Register rs2, Register rd) {
+        op3(Fpop1, Fabsd, null, rs2, rd);
+    }
+
+    public void fsubs(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsubs, rs1, rs2, rd);
+    }
+
+    public void fsubd(Register rs1, Register rs2, Register rd) {
+        op3(Fpop1, Fsubd, rs1, rs2, rd);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for fcmp.
+     * <pre>
+     * | 10  | --- |cc1|cc0|desc |   rs1   |   opf  | rs2 |
+     * |31 30|29 27|26 |25 |24 19|18     14|13     5|4   0|
+     * </pre>
+     */
+    // @formatter:on
+    public void fcmp(CC cc, Opfs opf, Register rs1, Register rs2) {
+        int a = cc.value;
+        int b = opf.value << 5 | rs2.encoding;
+        fmt10(a, Fcmp.value, rs1.encoding, b);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for most arithmetic stuff.
+     * <pre>
+     * |  10 | rd  | op3 | rs1 |   b   |
+     * |31 30|29 25|24 19|18 14|13    0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt10(int rd, int op3, int rs1, int b) {
+        fmt(0b10, rd, op3, rs1, b);
+    }
+
+    // @formatter:off
+    /**
+     * Instruction format for most arithmetic stuff.
+     * <pre>
+     * |  op | rd  | op3 | rs1 |   b   |
+     * |31 30|29 25|24 19|18 14|13    0|
+     * </pre>
+     */
+    // @formatter:on
+    protected void fmt(int op, int rd, int op3, int rs1, int b) {
+        assert isImm(rd, 5) && isImm(op3, 6) && isImm(b, 14) : String.format("rd: 0x%x op3: 0x%x b: 0x%x", rd, op3, b);
+        int instr = op << 30 | rd << 25 | op3 << 19 | rs1 << 14 | b;
+        emitInt(instr);
+    }
+
+    public void illtrap(int const22) {
+        fmt00(0, Op2s.Illtrap.value, const22);
+    }
+
+    public void jmpl(Register rs1, Register rs2, Register rd) {
+        op3(Jmpl, rs1, rs2, rd);
+    }
+
+    public void jmpl(Register rs1, int simm13, Register rd) {
+        op3(Jmpl, rs1, simm13, rd);
+    }
+
+    public void fmovdcc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
+        fmovcc(cond, cc, rs2, rd, Fmovdcc.value);
+    }
+
+    public void fmovscc(ConditionFlag cond, CC cc, Register rs2, Register rd) {
+        fmovcc(cond, cc, rs2, rd, Fmovscc.value);
+    }
+
+    private void fmovcc(ConditionFlag cond, CC cc, Register rs2, Register rd, int opfLow) {
+        int opfCC = cc.value;
+        int a = opfCC << 11 | opfLow << 5 | rs2.encoding;
+        fmt10(rd.encoding, Fpop2.value, cond.value, a);
+    }
+
+    public void movcc(ConditionFlag conditionFlag, CC cc, Register rs2, Register rd) {
+        movcc(conditionFlag, cc, 0, rs2.encoding, rd);
+    }
+
+    public void movcc(ConditionFlag conditionFlag, CC cc, int simm11, Register rd) {
+        assert isSimm11(simm11);
+        movcc(conditionFlag, cc, 1, simm11 & ((1 << 11) - 1), rd);
+    }
+
+    private void movcc(ConditionFlag conditionFlag, CC cc, int i, int imm, Register rd) {
+        int cc01 = 0b11 & cc.value;
+        int cc2 = cc.isFloat ? 0 : 1;
+        int a = cc2 << 4 | conditionFlag.value;
+        int b = cc01 << 11 | i << 13 | imm;
+        fmt10(rd.encoding, Movcc.value, a, b);
+    }
+
+    public void mulx(Register rs1, Register rs2, Register rd) {
+        op3(Mulx, rs1, rs2, rd);
+    }
+
+    public void mulx(Register rs1, int simm13, Register rd) {
+        op3(Mulx, rs1, simm13, rd);
+    }
+
+    public void or(Register rs1, Register rs2, Register rd) {
+        assert isCPURegister(rs1, rs2, rd) : String.format("%s %s %s", rs1, rs2, rd);
+        op3(Or, rs1, rs2, rd);
+    }
+
+    public void or(Register rs1, int simm13, Register rd) {
+        assert isCPURegister(rs1, rd) : String.format("%s %s", rs1, rd);
+        op3(Or, rs1, simm13, rd);
+    }
+
+    public void popc(Register rs2, Register rd) {
+        op3(Popc, g0, rs2, rd);
+    }
+
+    public void popc(int simm13, Register rd) {
+        op3(Popc, g0, simm13, rd);
+    }
+
+    public void prefetch(SPARCAddress addr, Fcn fcn) {
+        Register rs1 = addr.getBase();
+        if (addr.getIndex().equals(Register.None)) {
+            int dis = addr.getDisplacement();
+            assert isSimm13(dis);
+            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, 1 << 13 | dis & ((1 << 13) - 1));
+        } else {
+            Register rs2 = addr.getIndex();
+            fmt(Prefetch.op.op, fcn.value, Prefetch.value, rs1.encoding, rs2.encoding);
+        }
+    }
+
+    // A.44 Read State Register
+
+    public void rdpc(Register rd) {
+        op3(Rd, r5, g0, rd);
+    }
+
+    public void restore(Register rs1, Register rs2, Register rd) {
+        op3(Restore, rs1, rs2, rd);
+    }
+
+    public static final int PC_RETURN_OFFSET = 8;
+
+    public void save(Register rs1, Register rs2, Register rd) {
+        op3(Save, rs1, rs2, rd);
+    }
+
+    public void save(Register rs1, int simm13, Register rd) {
+        op3(Save, rs1, simm13, rd);
+    }
+
+    public void sdivx(Register rs1, Register rs2, Register rd) {
+        op3(Sdivx, rs1, rs2, rd);
+    }
+
+    public void sdivx(Register rs1, int simm13, Register rd) {
+        op3(Sdivx, rs1, simm13, rd);
+    }
+
+    public void udivx(Register rs1, Register rs2, Register rd) {
+        op3(Udivx, rs1, rs2, rd);
+    }
+
+    public void udivx(Register rs1, int simm13, Register rd) {
+        op3(Udivx, rs1, simm13, rd);
+    }
+
+    public void sll(Register rs1, Register rs2, Register rd) {
+        op3(Sll, rs1, rs2, rd);
+    }
+
+    public void sll(Register rs1, int shcnt32, Register rd) {
+        assert isImm(shcnt32, 5);
+        op3(Sll, rs1, shcnt32, rd);
+    }
+
+    public void sllx(Register rs1, Register rs2, Register rd) {
+        op3(Sllx, rs1, rs2, rd);
+    }
+
+    public void sllx(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Sllx, rs1, shcnt64, rd);
+    }
+
+    public void sra(Register rs1, Register rs2, Register rd) {
+        op3(Sra, rs1, rs2, rd);
+    }
+
+    public void sra(Register rs1, int simm13, Register rd) {
+        op3(Sra, rs1, simm13, rd);
+    }
+
+    public void srax(Register rs1, Register rs2, Register rd) {
+        op3(Srax, rs1, rs2, rd);
+    }
+
+    public void srax(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Srax, rs1, shcnt64, rd);
+    }
+
+    public void srl(Register rs1, Register rs2, Register rd) {
+        op3(Srl, rs1, rs2, rd);
+    }
+
+    public void srl(Register rs1, int simm13, Register rd) {
+        op3(Srl, rs1, simm13, rd);
+    }
+
+    public void srlx(Register rs1, Register rs2, Register rd) {
+        op3(Srlx, rs1, rs2, rd);
+    }
+
+    public void srlx(Register rs1, int shcnt64, Register rd) {
+        assert isImm(shcnt64, 6);
+        op3(Srlx, rs1, shcnt64, rd);
+    }
+
+    public void fandd(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, Fandd, rs1, rs2, rd);
+    }
+
+    public void sub(Register rs1, Register rs2, Register rd) {
+        op3(Sub, rs1, rs2, rd);
+    }
+
+    public void sub(Register rs1, int simm13, Register rd) {
+        op3(Sub, rs1, simm13, rd);
+    }
+
+    public void subcc(Register rs1, Register rs2, Register rd) {
+        op3(Subcc, rs1, rs2, rd);
+    }
+
+    public void subcc(Register rs1, int simm13, Register rd) {
+        op3(Subcc, rs1, simm13, rd);
+    }
+
+    public void ta(int trap) {
+        tcc(Icc, Always, trap);
+    }
+
+    public void tcc(CC cc, ConditionFlag flag, int trap) {
+        assert isImm(trap, 8);
+        int b = cc.value << 11;
+        b |= 1 << 13;
+        b |= trap;
+        fmt10(flag.value, Op3s.Tcc.getValue(), 0, b);
+    }
+
+    public void wrccr(Register rs1, Register rs2) {
+        op3(Wr, rs1, rs2, r2);
+    }
+
+    public void wrccr(Register rs1, int simm13) {
+        op3(Wr, rs1, simm13, r2);
+    }
+
+    public void xor(Register rs1, Register rs2, Register rd) {
+        op3(Xor, rs1, rs2, rd);
+    }
+
+    public void xor(Register rs1, int simm13, Register rd) {
+        op3(Xor, rs1, simm13, rd);
+    }
+
+    public void xorcc(Register rs1, Register rs2, Register rd) {
+        op3(Xorcc, rs1, rs2, rd);
+    }
+
+    public void xorcc(Register rs1, int simm13, Register rd) {
+        op3(Xorcc, rs1, simm13, rd);
+    }
+
+    public void xnor(Register rs1, Register rs2, Register rd) {
+        op3(Xnor, rs1, rs2, rd);
+    }
+
+    public void xnor(Register rs1, int simm13, Register rd) {
+        op3(Xnor, rs1, simm13, rd);
+    }
+
+    /*
+     * Load/Store
+     */
+    protected void ld(Op3s op3, SPARCAddress addr, Register rd, Asi asi) {
+        Register rs1 = addr.getBase();
+        if (!addr.getIndex().equals(Register.None)) {
+            Register rs2 = addr.getIndex();
+            if (asi != null) {
+                int b = rs2.encoding;
+                b |= asi.value << 5;
+                fmt(op3.op.op, rd.encoding, op3.value, rs1.encoding, b);
+            } else {
+                op3(op3, rs1, rs2, rd);
+            }
+        } else {
+            int imm = addr.getDisplacement();
+            op3(op3, rs1, imm, rd);
+        }
+    }
+
+    protected void ld(Op3s op3, SPARCAddress addr, Register rd) {
+        ld(op3, addr, rd, null);
+    }
+
+    public void lddf(SPARCAddress src, Register dst) {
+        assert isDoubleFloatRegister(dst) : dst;
+        ld(Lddf, src, dst);
+    }
+
+    public void ldf(SPARCAddress src, Register dst) {
+        assert isSingleFloatRegister(dst) : dst;
+        ld(Ldf, src, dst);
+    }
+
+    public void lduh(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Lduh, src, dst);
+    }
+
+    public void ldsh(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsh, src, dst);
+    }
+
+    public void ldub(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldub, src, dst);
+    }
+
+    public void ldsb(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsb, src, dst);
+    }
+
+    public void lduw(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Lduw, src, dst);
+    }
+
+    public void ldsw(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldsw, src, dst);
+    }
+
+    public void ldx(SPARCAddress src, Register dst) {
+        assert isCPURegister(dst) : dst;
+        ld(Ldx, src, dst);
+    }
+
+    public void ldxa(Register rs1, Register rs2, Register rd, Asi asi) {
+        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
+        ld(Ldxa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    public void lduwa(Register rs1, Register rs2, Register rd, Asi asi) {
+        assert SPARC.isCPURegister(rs1, rs2, rd) : format("%s %s %s", rs1, rs2, rd);
+        ld(Lduwa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    protected void st(Op3s op3, Register rs1, SPARCAddress dest) {
+        ld(op3, dest, rs1);
+    }
+
+    public void stdf(Register rd, SPARCAddress addr) {
+        assert isDoubleFloatRegister(rd) : rd;
+        st(Stdf, rd, addr);
+    }
+
+    public void stf(Register rd, SPARCAddress addr) {
+        assert isSingleFloatRegister(rd) : rd;
+        st(Stf, rd, addr);
+    }
+
+    public void stb(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stb, rd, addr);
+    }
+
+    public void sth(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Sth, rd, addr);
+    }
+
+    public void stw(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stw, rd, addr);
+    }
+
+    public void stx(Register rd, SPARCAddress addr) {
+        assert isCPURegister(rd) : rd;
+        st(Stx, rd, addr);
+    }
+
+    public void membar(int barriers) {
+        op3(Membar, r15, barriers, g0);
+    }
+
+    public void casa(Register rs1, Register rs2, Register rd, Asi asi) {
+        ld(Casa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    public void casxa(Register rs1, Register rs2, Register rd, Asi asi) {
+        ld(Casxa, new SPARCAddress(rs1, rs2), rd, asi);
+    }
+
+    @Override
+    public InstructionCounter getInstructionCounter() {
+        return new SPARCInstructionCounter(this);
+    }
+
+    public void patchAddImmediate(int position, int simm13) {
+        int inst = getInt(position);
+        assert SPARCAssembler.isSimm13(simm13) : simm13;
+        assert (inst >>> 30) == 0b10 : String.format("0x%x", inst);
+        assert ((inst >>> 18) & 0b11_1111) == 0 : String.format("0x%x", inst);
+        assert (inst & (1 << 13)) != 0 : String.format("0x%x", inst);
+        inst = inst & (~((1 << 13) - 1));
+        inst |= simm13 & ((1 << 12) - 1);
+        emitInt(inst, position);
+    }
+
+    public void fpadd32(Register rs1, Register rs2, Register rd) {
+        op3(Impdep1, Fpadd32, rs1, rs2, rd);
+    }
+
+    public boolean isCbcond(int i) {
+        return (i & 0xC1C00000) == 0xC00000;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCInstructionCounter.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.sparc;
+
+import java.util.*;
+
+import com.oracle.graal.asm.Assembler.*;
+
+public class SPARCInstructionCounter implements InstructionCounter {
+    // Use a treemap to keep the order in the output
+    private static final TreeMap<String, SPARCInstructionMatch> INSTRUCTION_MATCHER = new TreeMap<>();
+    static {
+        // @formatter:off
+        INSTRUCTION_MATCHER.put("nop", new SPARCInstructionMatch(0xFFFF_FFFF, 0x0100_0000));
+        INSTRUCTION_MATCHER.put("st", new OP3LowBitsMatcher(0b11, 0x4, 0x5, 0x6, 0x7, 0xe, 0xf));
+        INSTRUCTION_MATCHER.put("ld", new OP3LowBitsMatcher(0b11, 0x0, 0x1, 0x2, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd));
+        INSTRUCTION_MATCHER.put("all", new SPARCInstructionMatch(0x0, 0x0));
+        // @formatter:on
+    }
+    private final SPARCAssembler asm;
+
+    public SPARCInstructionCounter(SPARCAssembler asm) {
+        super();
+        this.asm = asm;
+    }
+
+    @Override
+    public int[] countInstructions(String[] instructionTypes, int beginPc, int endPc) {
+        SPARCInstructionMatch[] matchers = new SPARCInstructionMatch[instructionTypes.length];
+        for (int i = 0; i < instructionTypes.length; i++) {
+            String typeName = instructionTypes[i];
+            matchers[i] = INSTRUCTION_MATCHER.get(typeName);
+            if (matchers[i] == null) {
+                throw new IllegalArgumentException(String.format("Unknown instruction class %s, supported types are: %s", typeName, INSTRUCTION_MATCHER.keySet()));
+            }
+        }
+        return countBetween(matchers, beginPc, endPc);
+    }
+
+    private int[] countBetween(SPARCInstructionMatch[] matchers, int startPc, int endPc) {
+        int[] counts = new int[matchers.length];
+        for (int p = startPc; p < endPc; p += 4) {
+            int instr = asm.getInt(p);
+            for (int i = 0; i < matchers.length; i++) {
+                SPARCInstructionMatch matcher = matchers[i];
+                if (matcher.matches(instr)) {
+                    counts[i]++;
+                }
+            }
+        }
+        return counts;
+    }
+
+    @Override
+    public String[] getSupportedInstructionTypes() {
+        return INSTRUCTION_MATCHER.keySet().toArray(new String[0]);
+    }
+
+    /**
+     * Tests the lower 3 bits of the op3 field.
+     */
+    private static class OP3LowBitsMatcher extends SPARCInstructionMatch {
+        private final int[] op3b03;
+        private final int op;
+
+        public OP3LowBitsMatcher(int op, int... op3b03) {
+            super(0, 0);
+            this.op = op;
+            this.op3b03 = op3b03;
+        }
+
+        @Override
+        public boolean matches(int instruction) {
+            if (instruction >>> 30 != op) {
+                return false;
+            }
+            int op3lo = (instruction >> 19) & ((1 << 4) - 1);
+            for (int op3Part : op3b03) {
+                if (op3Part == op3lo) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+
+    private static class SPARCInstructionMatch {
+        private final int mask;
+        private final int[] patterns;
+
+        public SPARCInstructionMatch(int mask, int... patterns) {
+            super();
+            this.mask = mask;
+            this.patterns = patterns;
+        }
+
+        public boolean matches(int instruction) {
+            for (int pattern : patterns) {
+                if ((instruction & mask) == pattern) {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.sparc/src/com/oracle/graal/asm/sparc/SPARCMacroAssembler.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.sparc;
+
+import static com.oracle.graal.asm.sparc.SPARCAssembler.Annul.*;
+import static com.oracle.graal.asm.sparc.SPARCAssembler.ConditionFlag.*;
+import static com.oracle.jvmci.sparc.SPARC.*;
+
+import java.util.function.*;
+
+import com.oracle.graal.asm.*;
+import com.oracle.jvmci.code.*;
+
+public class SPARCMacroAssembler extends SPARCAssembler {
+
+    /**
+     * A sentinel value used as a place holder in an instruction stream for an address that will be
+     * patched.
+     */
+    private static final SPARCAddress Placeholder = new SPARCAddress(g0, 0);
+    private final ScratchRegister[] scratchRegister = new ScratchRegister[]{new ScratchRegister(g1), new ScratchRegister(g3)};
+    // Points to the next free scratch register
+    private int nextFreeScratchRegister = 0;
+
+    public SPARCMacroAssembler(TargetDescription target, RegisterConfig registerConfig) {
+        super(target, registerConfig);
+    }
+
+    @Override
+    public void align(int modulus) {
+        while (position() % modulus != 0) {
+            nop();
+        }
+    }
+
+    @Override
+    public void jmp(Label l) {
+        bicc(Always, NOT_ANNUL, l);
+        nop();  // delay slot
+    }
+
+    @Override
+    protected final void patchJumpTarget(int branch, int branchTarget) {
+        final int disp = (branchTarget - branch) / 4;
+        final int inst = getInt(branch);
+        Op2s op2 = Op2s.byValue((inst & OP2_MASK) >> OP2_SHIFT);
+        int maskBits;
+        int setBits;
+        switch (op2) {
+            case Br:
+            case Fb:
+            case Sethi:
+            case Illtrap:
+                // Disp 22 in the lower 22 bits
+                assert isSimm(disp, 22);
+                setBits = disp << DISP22_SHIFT;
+                maskBits = DISP22_MASK;
+                break;
+            case Fbp:
+            case Bp:
+                // Disp 19 in the lower 19 bits
+                assert isSimm(disp, 19);
+                setBits = disp << DISP19_SHIFT;
+                maskBits = DISP19_MASK;
+                break;
+            case Bpr:
+                boolean isCBcond = (inst & CBCOND_MASK) != 0;
+                if (isCBcond) {
+                    assert isSimm10(disp) : String.format("%d: instruction: 0x%x", disp, inst);
+                    int d10Split = 0;
+                    d10Split |= (disp & 0b11_0000_0000) << D10HI_SHIFT - 8;
+                    d10Split |= (disp & 0b00_1111_1111) << D10LO_SHIFT;
+                    setBits = d10Split;
+                    maskBits = D10LO_MASK | D10HI_MASK;
+                } else {
+                    assert isSimm(disp, 16);
+                    int d16Split = 0;
+                    d16Split |= (disp & 0b1100_0000_0000_0000) << D16HI_SHIFT - 14;
+                    d16Split |= (disp & 0b0011_1111_1111_1111) << D16LO_SHIFT;
+                    setBits = d16Split;
+                    maskBits = D16HI_MASK | D16LO_MASK;
+                }
+                break;
+            default:
+                throw new InternalError("Unknown op2 " + op2);
+        }
+        int newInst = ~maskBits & inst;
+        newInst |= setBits;
+        emitInt(newInst, branch);
+    }
+
+    @Override
+    public AbstractAddress makeAddress(Register base, int displacement) {
+        return new SPARCAddress(base, displacement);
+    }
+
+    @Override
+    public AbstractAddress getPlaceholder() {
+        return Placeholder;
+    }
+
+    @Override
+    public final void ensureUniquePC() {
+        nop();
+    }
+
+    public void cas(Register rs1, Register rs2, Register rd) {
+        casa(rs1, rs2, rd, Asi.ASI_PRIMARY);
+    }
+
+    public void casx(Register rs1, Register rs2, Register rd) {
+        casxa(rs1, rs2, rd, Asi.ASI_PRIMARY);
+    }
+
+    public void clr(Register dst) {
+        or(g0, g0, dst);
+    }
+
+    public void clrb(SPARCAddress addr) {
+        stb(g0, addr);
+    }
+
+    public void clrh(SPARCAddress addr) {
+        sth(g0, addr);
+    }
+
+    public void clrx(SPARCAddress addr) {
+        stx(g0, addr);
+    }
+
+    public void cmp(Register rs1, Register rs2) {
+        subcc(rs1, rs2, g0);
+    }
+
+    public void cmp(Register rs1, int simm13) {
+        subcc(rs1, simm13, g0);
+    }
+
+    public void dec(Register rd) {
+        sub(rd, 1, rd);
+    }
+
+    public void dec(int simm13, Register rd) {
+        sub(rd, simm13, rd);
+    }
+
+    public void jmp(SPARCAddress address) {
+        jmpl(address.getBase(), address.getDisplacement(), g0);
+    }
+
+    public void jmp(Register rd) {
+        jmpl(rd, 0, g0);
+    }
+
+    public void neg(Register rs1, Register rd) {
+        sub(g0, rs1, rd);
+    }
+
+    public void neg(Register rd) {
+        sub(g0, rd, rd);
+    }
+
+    public void mov(Register rs, Register rd) {
+        or(g0, rs, rd);
+    }
+
+    public void mov(int simm13, Register rd) {
+        or(g0, simm13, rd);
+    }
+
+    public void not(Register rs1, Register rd) {
+        xnor(rs1, g0, rd);
+    }
+
+    public void not(Register rd) {
+        xnor(rd, g0, rd);
+    }
+
+    public void restoreWindow() {
+        restore(g0, g0, g0);
+    }
+
+    public void ret() {
+        jmpl(i7, 8, g0);
+    }
+
+    /**
+     * This instruction is like sethi but for 64-bit values.
+     */
+    public static class Sethix {
+
+        private static final int INSTRUCTION_SIZE = 7;
+
+        private long value;
+        private Register dst;
+        private boolean forceRelocatable;
+        private boolean delayed = false;
+        private Consumer<SPARCAssembler> delayedInstructionEmitter;
+
+        public Sethix(long value, Register dst, boolean forceRelocatable, boolean delayed) {
+            this(value, dst, forceRelocatable);
+            assert !(forceRelocatable && delayed) : "Relocatable sethix cannot be delayed";
+            this.delayed = delayed;
+        }
+
+        public Sethix(long value, Register dst, boolean forceRelocatable) {
+            this.value = value;
+            this.dst = dst;
+            this.forceRelocatable = forceRelocatable;
+        }
+
+        public Sethix(long value, Register dst) {
+            this(value, dst, false);
+        }
+
+        private void emitInstruction(Consumer<SPARCAssembler> cb, SPARCMacroAssembler masm) {
+            if (delayed) {
+                if (this.delayedInstructionEmitter != null) {
+                    delayedInstructionEmitter.accept(masm);
+                }
+                delayedInstructionEmitter = cb;
+            } else {
+                cb.accept(masm);
+            }
+        }
+
+        public void emit(SPARCMacroAssembler masm) {
+            final int hi = (int) (value >> 32);
+            final int lo = (int) (value & ~0);
+
+            // This is the same logic as MacroAssembler::internal_set.
+            final int startPc = masm.position();
+
+            if (hi == 0 && lo >= 0) {
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(lo), dst);
+                emitInstruction(cb, masm);
+            } else if (hi == -1) {
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(~lo), dst);
+                emitInstruction(cb, masm);
+                cb = eMasm -> eMasm.xor(dst, ~lo10(~0), dst);
+                emitInstruction(cb, masm);
+            } else {
+                final int shiftcnt;
+                final int shiftcnt2;
+                Consumer<SPARCAssembler> cb = eMasm -> eMasm.sethi(hi22(hi), dst);
+                emitInstruction(cb, masm);
+                if ((hi & 0x3ff) != 0) {                                  // Any bits?
+                    // msb 32-bits are now in lsb 32
+                    cb = eMasm -> eMasm.or(dst, hi & 0x3ff, dst);
+                    emitInstruction(cb, masm);
+                }
+                if ((lo & 0xFFFFFC00) != 0) {                             // done?
+                    if (((lo >> 20) & 0xfff) != 0) {                      // Any bits set?
+                        // Make room for next 12 bits
+                        cb = eMasm -> eMasm.sllx(dst, 12, dst);
+                        emitInstruction(cb, masm);
+                        // Or in next 12
+                        cb = eMasm -> eMasm.or(dst, (lo >> 20) & 0xfff, dst);
+                        emitInstruction(cb, masm);
+                        shiftcnt = 0;                                     // We already shifted
+                    } else {
+                        shiftcnt = 12;
+                    }
+                    if (((lo >> 10) & 0x3ff) != 0) {
+                        // Make room for last 10 bits
+                        cb = eMasm -> eMasm.sllx(dst, shiftcnt + 10, dst);
+                        emitInstruction(cb, masm);
+                        // Or in next 10
+                        cb = eMasm -> eMasm.or(dst, (lo >> 10) & 0x3ff, dst);
+                        emitInstruction(cb, masm);
+                        shiftcnt2 = 0;
+                    } else {
+                        shiftcnt2 = 10;
+                    }
+                    // Shift leaving disp field 0'd
+                    cb = eMasm -> eMasm.sllx(dst, shiftcnt2 + 10, dst);
+                    emitInstruction(cb, masm);
+                } else {
+                    cb = eMasm -> eMasm.sllx(dst, 32, dst);
+                    emitInstruction(cb, masm);
+                }
+            }
+            // Pad out the instruction sequence so it can be patched later.
+            if (forceRelocatable) {
+                while (masm.position() < (startPc + (INSTRUCTION_SIZE * 4))) {
+                    Consumer<SPARCAssembler> cb = eMasm -> eMasm.nop();
+                    emitInstruction(cb, masm);
+                }
+            }
+        }
+
+        public void emitDelayed(SPARCMacroAssembler masm) {
+            assert delayedInstructionEmitter != null;
+            delayedInstructionEmitter.accept(masm);
+        }
+    }
+
+    public static class Setx {
+
+        private long value;
+        private Register dst;
+        private boolean forceRelocatable;
+        private boolean delayed = false;
+        private boolean delayedFirstEmitted = false;
+        private Sethix sethix;
+        private Consumer<SPARCMacroAssembler> delayedAdd;
+
+        public Setx(long value, Register dst, boolean forceRelocatable, boolean delayed) {
+            assert !(forceRelocatable && delayed) : "Cannot use relocatable setx as delayable";
+            this.value = value;
+            this.dst = dst;
+            this.forceRelocatable = forceRelocatable;
+            this.delayed = delayed;
+        }
+
+        public Setx(long value, Register dst, boolean forceRelocatable) {
+            this(value, dst, forceRelocatable, false);
+        }
+
+        public Setx(long value, Register dst) {
+            this(value, dst, false);
+        }
+
+        public void emit(SPARCMacroAssembler masm) {
+            assert !delayed;
+            doEmit(masm);
+        }
+
+        private void doEmit(SPARCMacroAssembler masm) {
+            sethix = new Sethix(value, dst, forceRelocatable, delayed);
+            sethix.emit(masm);
+            int lo = (int) (value & ~0);
+            if (lo10(lo) != 0 || forceRelocatable) {
+                Consumer<SPARCMacroAssembler> add = eMasm -> eMasm.add(dst, lo10(lo), dst);
+                if (delayed) {
+                    sethix.emitDelayed(masm);
+                    sethix = null;
+                    delayedAdd = add;
+                } else {
+                    sethix = null;
+                    add.accept(masm);
+                }
+            }
+        }
+
+        public void emitFirstPartOfDelayed(SPARCMacroAssembler masm) {
+            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
+            assert delayed : "Can only be used in delayed mode";
+            doEmit(masm);
+            delayedFirstEmitted = true;
+        }
+
+        public void emitSecondPartOfDelayed(SPARCMacroAssembler masm) {
+            assert !forceRelocatable : "Cannot use delayed mode with relocatable setx";
+            assert delayed : "Can only be used in delayed mode";
+            assert delayedFirstEmitted : "First part has not been emitted so far.";
+            assert delayedAdd == null && sethix != null || delayedAdd != null && sethix == null : "Either add or sethix must be set";
+            if (delayedAdd != null) {
+                delayedAdd.accept(masm);
+            } else {
+                sethix.emitDelayed(masm);
+            }
+
+        }
+    }
+
+    public void signx(Register rs, Register rd) {
+        sra(rs, g0, rd);
+    }
+
+    public void signx(Register rd) {
+        sra(rd, g0, rd);
+    }
+
+    public ScratchRegister getScratchRegister() {
+        return scratchRegister[nextFreeScratchRegister++];
+    }
+
+    public class ScratchRegister implements AutoCloseable {
+        private final Register register;
+
+        public ScratchRegister(Register register) {
+            super();
+            this.register = register;
+        }
+
+        public Register getRegister() {
+            return register;
+        }
+
+        public void close() {
+            assert nextFreeScratchRegister > 0 : "Close called too often";
+            nextFreeScratchRegister--;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm.test/src/com/oracle/graal/asm/test/AssemblerTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm.test;
+
+import java.lang.reflect.*;
+
+import org.junit.*;
+
+import com.oracle.graal.test.*;
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.debug.*;
+import com.oracle.jvmci.debug.Debug.Scope;
+import com.oracle.jvmci.meta.*;
+import com.oracle.jvmci.runtime.*;
+import com.oracle.jvmci.service.*;
+
+public abstract class AssemblerTest extends GraalTest {
+
+    private final MetaAccessProvider metaAccess;
+    protected final CodeCacheProvider codeCache;
+
+    public interface CodeGenTest {
+        byte[] generateCode(CompilationResult compResult, TargetDescription target, RegisterConfig registerConfig, CallingConvention cc);
+    }
+
+    public AssemblerTest() {
+        JVMCIBackend providers = JVMCI.getRuntime().getHostJVMCIBackend();
+        this.metaAccess = providers.getMetaAccess();
+        this.codeCache = providers.getCodeCache();
+    }
+
+    public MetaAccessProvider getMetaAccess() {
+        return metaAccess;
+    }
+
+    protected InstalledCode assembleMethod(Method m, CodeGenTest test) {
+        ResolvedJavaMethod method = getMetaAccess().lookupJavaMethod(m);
+        try (Scope s = Debug.scope("assembleMethod", method, codeCache)) {
+            RegisterConfig registerConfig = codeCache.getRegisterConfig();
+            CallingConvention cc = CodeUtil.getCallingConvention(codeCache, CallingConvention.Type.JavaCallee, method, false);
+
+            CompilationResult compResult = new CompilationResult();
+            byte[] targetCode = test.generateCode(compResult, codeCache.getTarget(), registerConfig, cc);
+            compResult.setTargetCode(targetCode, targetCode.length);
+            compResult.setTotalFrameSize(0);
+
+            InstalledCode code = codeCache.addMethod(method, compResult, null, null);
+
+            for (DisassemblerProvider dis : Services.load(DisassemblerProvider.class)) {
+                String disasm = dis.disassemble(code);
+                Assert.assertTrue(code.toString(), disasm == null || disasm.length() > 0);
+            }
+            return code;
+        } catch (Throwable e) {
+            throw Debug.handle(e);
+        }
+    }
+
+    protected Object runTest(String methodName, CodeGenTest test, Object... args) {
+        Method method = getMethod(methodName);
+        InstalledCode code = assembleMethod(method, test);
+        try {
+            return code.executeVarargs(args);
+        } catch (InvalidInstalledCodeException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    protected void assertReturn(String methodName, CodeGenTest test, Object expected, Object... args) {
+        Object actual = runTest(methodName, test, args);
+        Assert.assertEquals("unexpected return value", expected, actual);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/overview.html	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.  Oracle designates this
+particular file as subject to the "Classpath" exception as provided
+by Oracle in the LICENSE file that accompanied this code.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+-->
+
+</head>
+<body>
+
+Documentation for the <code>com.oracle.max.asm</code> project.
+
+</body>
+</html>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/AsmOptions.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2011, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm;
+
+public class AsmOptions {
+
+    public static int InitialCodeBufferSize = 232;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Assembler.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm;
+
+import java.nio.*;
+import java.util.*;
+
+import com.oracle.jvmci.code.*;
+
+/**
+ * The platform-independent base class for the assembler.
+ */
+public abstract class Assembler {
+
+    public final TargetDescription target;
+    private List<LabelHint> jumpDisplacementHints;
+
+    /**
+     * Backing code buffer.
+     */
+    private final Buffer codeBuffer;
+
+    public Assembler(TargetDescription target) {
+        this.target = target;
+        if (target.arch.getByteOrder() == ByteOrder.BIG_ENDIAN) {
+            this.codeBuffer = new Buffer.BigEndian();
+        } else {
+            this.codeBuffer = new Buffer.LittleEndian();
+        }
+    }
+
+    /**
+     * Returns the current position of the underlying code buffer.
+     *
+     * @return current position in code buffer
+     */
+    public int position() {
+        return codeBuffer.position();
+    }
+
+    public final void emitByte(int x) {
+        codeBuffer.emitByte(x);
+    }
+
+    public final void emitShort(int x) {
+        codeBuffer.emitShort(x);
+    }
+
+    public final void emitInt(int x) {
+        codeBuffer.emitInt(x);
+    }
+
+    public final void emitLong(long x) {
+        codeBuffer.emitLong(x);
+    }
+
+    public final void emitByte(int b, int pos) {
+        codeBuffer.emitByte(b, pos);
+    }
+
+    public final void emitShort(int b, int pos) {
+        codeBuffer.emitShort(b, pos);
+    }
+
+    public final void emitInt(int b, int pos) {
+        codeBuffer.emitInt(b, pos);
+    }
+
+    public final void emitLong(long b, int pos) {
+        codeBuffer.emitLong(b, pos);
+    }
+
+    public final int getByte(int pos) {
+        return codeBuffer.getByte(pos);
+    }
+
+    public final int getShort(int pos) {
+        return codeBuffer.getShort(pos);
+    }
+
+    public final int getInt(int pos) {
+        return codeBuffer.getInt(pos);
+    }
+
+    private static final String NEWLINE = System.getProperty("line.separator");
+
+    /**
+     * Some GPU architectures have a text based encoding.
+     */
+    public final void emitString(String x) {
+        emitString0("\t");  // XXX REMOVE ME pretty-printing
+        emitString0(x);
+        emitString0(NEWLINE);
+    }
+
+    // XXX for pretty-printing
+    public final void emitString0(String x) {
+        codeBuffer.emitBytes(x.getBytes(), 0, x.length());
+    }
+
+    public void emitString(String s, int pos) {
+        codeBuffer.emitBytes(s.getBytes(), pos);
+    }
+
+    /**
+     * Closes this assembler. No extra data can be written to this assembler after this call.
+     *
+     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
+     *            including) {@code position()} is returned
+     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
+     */
+    public byte[] close(boolean trimmedCopy) {
+        return codeBuffer.close(trimmedCopy);
+    }
+
+    public void bind(Label l) {
+        assert !l.isBound() : "can bind label only once";
+        l.bind(position());
+        l.patchInstructions(this);
+    }
+
+    public abstract void align(int modulus);
+
+    public abstract void jmp(Label l);
+
+    protected abstract void patchJumpTarget(int branch, int jumpTarget);
+
+    private Map<Label, String> nameMap;
+
+    /**
+     * Creates a name for a label.
+     *
+     * @param l the label for which a name is being created
+     * @param id a label identifier that is unique with the scope of this assembler
+     * @return a label name in the form of "L123"
+     */
+    protected String createLabelName(Label l, int id) {
+        return "L" + id;
+    }
+
+    /**
+     * Gets a name for a label, creating it if it does not yet exist. By default, the returned name
+     * is only unique with the scope of this assembler.
+     */
+    public String nameOf(Label l) {
+        if (nameMap == null) {
+            nameMap = new HashMap<>();
+        }
+        String name = nameMap.get(l);
+        if (name == null) {
+            name = createLabelName(l, nameMap.size());
+            nameMap.put(l, name);
+        }
+        return name;
+    }
+
+    /**
+     * This is used by the CompilationResultBuilder to convert a {@link StackSlot} to an
+     * {@link AbstractAddress}.
+     */
+    public abstract AbstractAddress makeAddress(Register base, int displacement);
+
+    /**
+     * Returns a target specific placeholder address that can be used for code patching.
+     */
+    public abstract AbstractAddress getPlaceholder();
+
+    /**
+     * Emits a NOP instruction to advance the current PC.
+     */
+    public abstract void ensureUniquePC();
+
+    public void reset() {
+        codeBuffer.reset();
+        captureLabelPositions();
+    }
+
+    private void captureLabelPositions() {
+        if (jumpDisplacementHints == null) {
+            return;
+        }
+        for (LabelHint request : this.jumpDisplacementHints) {
+            request.capture();
+        }
+    }
+
+    public LabelHint requestLabelHint(Label label) {
+        if (jumpDisplacementHints == null) {
+            jumpDisplacementHints = new ArrayList<>();
+        }
+        LabelHint hint = new LabelHint(label, position());
+        this.jumpDisplacementHints.add(hint);
+        return hint;
+    }
+
+    public InstructionCounter getInstructionCounter() {
+        throw new UnsupportedOperationException("Instruction counter is not implemented for " + this);
+    }
+
+    public static class LabelHint {
+        private Label label;
+        private int forPosition;
+        private int capturedTarget = -1;
+
+        protected LabelHint(Label label, int lastPosition) {
+            super();
+            this.label = label;
+            this.forPosition = lastPosition;
+        }
+
+        protected void capture() {
+            this.capturedTarget = label.position();
+        }
+
+        public int getTarget() {
+            assert isValid();
+            return capturedTarget;
+        }
+
+        public int getPosition() {
+            assert isValid();
+            return forPosition;
+        }
+
+        public boolean isValid() {
+            return capturedTarget >= 0;
+        }
+    }
+
+    /**
+     * Instruction counter class which gives the user of the assembler to count different kinds of
+     * instructions in the generated assembler code.
+     */
+    public interface InstructionCounter {
+        String[] getSupportedInstructionTypes();
+
+        int[] countInstructions(String[] instructionTypes, int beginPc, int endPc);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Buffer.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm;
+
+import java.util.*;
+
+/**
+ * Code buffer management for the assembler. Support for little endian and big endian architectures
+ * is implemented using subclasses.
+ */
+abstract class Buffer {
+
+    protected byte[] data;
+    protected int position;
+
+    public Buffer() {
+        data = new byte[AsmOptions.InitialCodeBufferSize];
+    }
+
+    public int position() {
+        return position;
+    }
+
+    public void setPosition(int position) {
+        assert position >= 0 && position <= data.length;
+        this.position = position;
+    }
+
+    /**
+     * Closes this buffer. No extra data can be written to this buffer after this call.
+     *
+     * @param trimmedCopy if {@code true}, then a copy of the underlying byte array up to (but not
+     *            including) {@code position()} is returned
+     * @return the data in this buffer or a trimmed copy if {@code trimmedCopy} is {@code true}
+     */
+    public byte[] close(boolean trimmedCopy) {
+        byte[] result = trimmedCopy ? Arrays.copyOf(data, position()) : data;
+        data = null;
+        return result;
+    }
+
+    public byte[] copyData(int start, int end) {
+        if (data == null) {
+            return null;
+        }
+        return Arrays.copyOfRange(data, start, end);
+    }
+
+    /**
+     * Copies the data from this buffer into a given array.
+     *
+     * @param dst the destination array
+     * @param off starting position in {@code dst}
+     * @param len number of bytes to copy
+     */
+    public void copyInto(byte[] dst, int off, int len) {
+        System.arraycopy(data, 0, dst, off, len);
+    }
+
+    protected void ensureSize(int length) {
+        if (length >= data.length) {
+            data = Arrays.copyOf(data, length * 4);
+        }
+    }
+
+    public void emitBytes(byte[] arr, int off, int len) {
+        ensureSize(position + len);
+        System.arraycopy(arr, off, data, position, len);
+        position += len;
+    }
+
+    public void emitByte(int b) {
+        position = emitByte(b, position);
+    }
+
+    public void emitShort(int b) {
+        position = emitShort(b, position);
+    }
+
+    public void emitInt(int b) {
+        position = emitInt(b, position);
+    }
+
+    public void emitLong(long b) {
+        position = emitLong(b, position);
+    }
+
+    public int emitBytes(byte[] arr, int pos) {
+        final int len = arr.length;
+        final int newPos = pos + len;
+        ensureSize(newPos);
+        System.arraycopy(arr, 0, data, pos, len);
+        return newPos;
+    }
+
+    public int emitByte(int b, int pos) {
+        assert NumUtil.isUByte(b) || NumUtil.isByte(b);
+        int newPos = pos + 1;
+        ensureSize(newPos);
+        data[pos] = (byte) (b & 0xFF);
+        return newPos;
+    }
+
+    public abstract int emitShort(int b, int pos);
+
+    public abstract int emitInt(int b, int pos);
+
+    public abstract int emitLong(long b, int pos);
+
+    public int getByte(int pos) {
+        return data[pos] & 0xff;
+    }
+
+    public abstract int getShort(int pos);
+
+    public abstract int getInt(int pos);
+
+    public static final class BigEndian extends Buffer {
+
+        @Override
+        public int emitShort(int b, int pos) {
+            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
+            int newPos = pos + 2;
+            ensureSize(pos + 2);
+            data[pos] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 1] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitInt(int b, int pos) {
+            int newPos = pos + 4;
+            ensureSize(newPos);
+            data[pos] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 1] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 3] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitLong(long b, int pos) {
+            int newPos = pos + 8;
+            ensureSize(newPos);
+            data[pos] = (byte) ((b >> 56) & 0xFF);
+            data[pos + 1] = (byte) ((b >> 48) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 40) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 32) & 0xFF);
+            data[pos + 4] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 5] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 6] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 7] = (byte) (b & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int getShort(int pos) {
+            return (data[pos + 0] & 0xff) << 8 | (data[pos + 1] & 0xff) << 0;
+        }
+
+        @Override
+        public int getInt(int pos) {
+            return (data[pos + 0] & 0xff) << 24 | (data[pos + 1] & 0xff) << 16 | (data[pos + 2] & 0xff) << 8 | (data[pos + 3] & 0xff) << 0;
+        }
+    }
+
+    public static final class LittleEndian extends Buffer {
+
+        @Override
+        public int emitShort(int b, int pos) {
+            assert NumUtil.isUShort(b) || NumUtil.isShort(b);
+            int newPos = pos + 2;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitInt(int b, int pos) {
+            int newPos = pos + 4;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int emitLong(long b, int pos) {
+            int newPos = pos + 8;
+            ensureSize(newPos);
+            data[pos] = (byte) (b & 0xFF);
+            data[pos + 1] = (byte) ((b >> 8) & 0xFF);
+            data[pos + 2] = (byte) ((b >> 16) & 0xFF);
+            data[pos + 3] = (byte) ((b >> 24) & 0xFF);
+            data[pos + 4] = (byte) ((b >> 32) & 0xFF);
+            data[pos + 5] = (byte) ((b >> 40) & 0xFF);
+            data[pos + 6] = (byte) ((b >> 48) & 0xFF);
+            data[pos + 7] = (byte) ((b >> 56) & 0xFF);
+            return newPos;
+        }
+
+        @Override
+        public int getShort(int pos) {
+            return (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
+        }
+
+        @Override
+        public int getInt(int pos) {
+            return (data[pos + 3] & 0xff) << 24 | (data[pos + 2] & 0xff) << 16 | (data[pos + 1] & 0xff) << 8 | (data[pos + 0] & 0xff) << 0;
+        }
+    }
+
+    public void reset() {
+        position = 0;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/Label.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm;
+
+import java.util.*;
+
+/**
+ * This class represents a label within assembly code.
+ */
+public final class Label {
+
+    private int position = -1;
+    private int blockId = -1;
+
+    /**
+     * References to instructions that jump to this unresolved label. These instructions need to be
+     * patched when the label is bound using the {@link #patchInstructions(Assembler)} method.
+     */
+    private ArrayList<Integer> patchPositions = null;
+
+    /**
+     * Returns the position of this label in the code buffer.
+     *
+     * @return the position
+     */
+    public int position() {
+        assert position >= 0 : "Unbound label is being referenced";
+        return position;
+    }
+
+    public Label() {
+    }
+
+    public Label(int id) {
+        blockId = id;
+    }
+
+    public int getBlockId() {
+        return blockId;
+    }
+
+    /**
+     * Binds the label to the specified position.
+     *
+     * @param pos the position
+     */
+    protected void bind(int pos) {
+        this.position = pos;
+        assert isBound();
+    }
+
+    public boolean isBound() {
+        return position >= 0;
+    }
+
+    public void addPatchAt(int branchLocation) {
+        assert !isBound() : "Label is already bound " + this + " " + branchLocation + " at position " + position;
+        if (patchPositions == null) {
+            patchPositions = new ArrayList<>(2);
+        }
+        patchPositions.add(branchLocation);
+    }
+
+    protected void patchInstructions(Assembler masm) {
+        assert isBound() : "Label should be bound";
+        if (patchPositions != null) {
+            int target = position;
+            for (int i = 0; i < patchPositions.size(); ++i) {
+                int pos = patchPositions.get(i);
+                masm.patchJumpTarget(pos, target);
+            }
+        }
+    }
+
+    public void reset() {
+        if (this.patchPositions != null) {
+            this.patchPositions.clear();
+        }
+        this.position = -1;
+    }
+
+    @Override
+    public String toString() {
+        return isBound() ? String.valueOf(position()) : "?";
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.asm/src/com/oracle/graal/asm/NumUtil.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.asm;
+
+// JaCoCo Exclude
+
+/**
+ * A collection of static utility functions that check ranges of numbers.
+ */
+public class NumUtil {
+
+    public static boolean isShiftCount(int x) {
+        return 0 <= x && x < 32;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of unsigned byte values.
+     */
+    public static boolean isUByte(int x) {
+        return (x & 0xff) == x;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of signed byte values.
+     */
+    public static boolean isByte(int x) {
+        return (byte) x == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of unsigned byte values.
+     */
+    public static boolean isUByte(long x) {
+        return (x & 0xffL) == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed byte values.
+     */
+    public static boolean isByte(long l) {
+        return (byte) l == l;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of unsigned int values.
+     */
+    public static boolean isUInt(long x) {
+        return (x & 0xffffffffL) == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed int values.
+     */
+    public static boolean isInt(long l) {
+        return (int) l == l;
+    }
+
+    /**
+     * Determines if a given {@code int} value is the range of signed short values.
+     */
+    public static boolean isShort(int x) {
+        return (short) x == x;
+    }
+
+    /**
+     * Determines if a given {@code long} value is the range of signed short values.
+     */
+    public static boolean isShort(long x) {
+        return (short) x == x;
+    }
+
+    public static boolean isUShort(int s) {
+        return s == (s & 0xFFFF);
+    }
+
+    public static boolean is32bit(long x) {
+        return -0x80000000L <= x && x < 0x80000000L;
+    }
+
+    public static short safeToShort(int v) {
+        assert isShort(v);
+        return (short) v;
+    }
+
+    public static int roundUp(int number, int mod) {
+        return ((number + mod - 1) / mod) * mod;
+    }
+
+    public static long roundUp(long number, long mod) {
+        return ((number + mod - 1L) / mod) * mod;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/overview.html	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
+<html>
+<head>
+<!--
+
+Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation.  Oracle designates this
+particular file as subject to the "Classpath" exception as provided
+by Oracle in the LICENSE file that accompanied this code.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
+-->
+
+</head>
+<body>
+
+Documentation for the <code>com.oracle.graal.bytecode</code> project.
+
+</body>
+</html>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeLookupSwitch.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+/**
+ * A utility for processing {@link Bytecodes#LOOKUPSWITCH} bytecodes.
+ */
+public class BytecodeLookupSwitch extends BytecodeSwitch {
+
+    private static final int OFFSET_TO_NUMBER_PAIRS = 4;
+    private static final int OFFSET_TO_FIRST_PAIR_MATCH = 8;
+    private static final int OFFSET_TO_FIRST_PAIR_OFFSET = 12;
+    private static final int PAIR_SIZE = 8;
+
+    /**
+     * Constructor for a {@link BytecodeStream}.
+     *
+     * @param stream the {@code BytecodeStream} containing the switch instruction
+     * @param bci the index in the stream of the switch instruction
+     */
+    public BytecodeLookupSwitch(BytecodeStream stream, int bci) {
+        super(stream, bci);
+    }
+
+    @Override
+    public int offsetAt(int i) {
+        return stream.readInt(alignedBci + OFFSET_TO_FIRST_PAIR_OFFSET + PAIR_SIZE * i);
+    }
+
+    @Override
+    public int keyAt(int i) {
+        return stream.readInt(alignedBci + OFFSET_TO_FIRST_PAIR_MATCH + PAIR_SIZE * i);
+    }
+
+    @Override
+    public int numberOfCases() {
+        return stream.readInt(alignedBci + OFFSET_TO_NUMBER_PAIRS);
+    }
+
+    @Override
+    public int size() {
+        return alignedBci + OFFSET_TO_FIRST_PAIR_MATCH + PAIR_SIZE * numberOfCases() - bci;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeStream.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+/**
+ * A utility class that makes iterating over bytecodes and reading operands simpler and less error
+ * prone. For example, it handles the {@link Bytecodes#WIDE} instruction and wide variants of
+ * instructions internally.
+ */
+public final class BytecodeStream {
+
+    private final byte[] code;
+    private int opcode;
+    private int curBCI;
+    private int nextBCI;
+
+    /**
+     * Creates a new {@code BytecodeStream} for the specified bytecode.
+     *
+     * @param code the array of bytes that contains the bytecode
+     */
+    public BytecodeStream(byte[] code) {
+        assert code != null;
+        this.code = code;
+        setBCI(0);
+    }
+
+    /**
+     * Advances to the next bytecode.
+     */
+    public void next() {
+        setBCI(nextBCI);
+    }
+
+    /**
+     * Gets the next bytecode index (no side-effects).
+     *
+     * @return the next bytecode index
+     */
+    public int nextBCI() {
+        return nextBCI;
+    }
+
+    /**
+     * Gets the current bytecode index.
+     *
+     * @return the current bytecode index
+     */
+    public int currentBCI() {
+        return curBCI;
+    }
+
+    /**
+     * Gets the bytecode index of the end of the code.
+     *
+     * @return the index of the end of the code
+     */
+    public int endBCI() {
+        return code.length;
+    }
+
+    /**
+     * Gets the current opcode. This method will never return the {@link Bytecodes#WIDE WIDE}
+     * opcode, but will instead return the opcode that is modified by the {@code WIDE} opcode.
+     *
+     * @return the current opcode; {@link Bytecodes#END} if at or beyond the end of the code
+     */
+    public int currentBC() {
+        if (opcode == Bytecodes.WIDE) {
+            return Bytes.beU1(code, curBCI + 1);
+        } else {
+            return opcode;
+        }
+    }
+
+    /**
+     * Reads the index of a local variable for one of the load or store instructions. The WIDE
+     * modifier is handled internally.
+     *
+     * @return the index of the local variable
+     */
+    public int readLocalIndex() {
+        // read local variable index for load/store
+        if (opcode == Bytecodes.WIDE) {
+            return Bytes.beU2(code, curBCI + 2);
+        }
+        return Bytes.beU1(code, curBCI + 1);
+    }
+
+    /**
+     * Read the delta for an {@link Bytecodes#IINC} bytecode.
+     *
+     * @return the delta for the {@code IINC}
+     */
+    public int readIncrement() {
+        // read the delta for the iinc bytecode
+        if (opcode == Bytecodes.WIDE) {
+            return Bytes.beS2(code, curBCI + 4);
+        }
+        return Bytes.beS1(code, curBCI + 2);
+    }
+
+    /**
+     * Read the destination of a {@link Bytecodes#GOTO} or {@code IF} instructions.
+     *
+     * @return the destination bytecode index
+     */
+    public int readBranchDest() {
+        // reads the destination for a branch bytecode
+        if (opcode == Bytecodes.GOTO_W || opcode == Bytecodes.JSR_W) {
+            return curBCI + Bytes.beS4(code, curBCI + 1);
+        } else {
+            return curBCI + Bytes.beS2(code, curBCI + 1);
+        }
+    }
+
+    /**
+     * Read a signed 4-byte integer from the bytecode stream at the specified bytecode index.
+     *
+     * @param bci the bytecode index
+     * @return the integer value
+     */
+    public int readInt(int bci) {
+        // reads a 4-byte signed value
+        return Bytes.beS4(code, bci);
+    }
+
+    /**
+     * Reads an unsigned, 1-byte value from the bytecode stream at the specified bytecode index.
+     *
+     * @param bci the bytecode index
+     * @return the byte
+     */
+    public int readUByte(int bci) {
+        return Bytes.beU1(code, bci);
+    }
+
+    /**
+     * Reads a constant pool index for the current instruction.
+     *
+     * @return the constant pool index
+     */
+    public char readCPI() {
+        if (opcode == Bytecodes.LDC) {
+            return (char) Bytes.beU1(code, curBCI + 1);
+        }
+        return (char) Bytes.beU2(code, curBCI + 1);
+    }
+
+    /**
+     * Reads a constant pool index for an invokedynamic instruction.
+     *
+     * @return the constant pool index
+     */
+    public int readCPI4() {
+        assert opcode == Bytecodes.INVOKEDYNAMIC;
+        return Bytes.beS4(code, curBCI + 1);
+    }
+
+    /**
+     * Reads a signed, 1-byte value for the current instruction (e.g. BIPUSH).
+     *
+     * @return the byte
+     */
+    public byte readByte() {
+        return code[curBCI + 1];
+    }
+
+    /**
+     * Reads a signed, 2-byte short for the current instruction (e.g. SIPUSH).
+     *
+     * @return the short value
+     */
+    public short readShort() {
+        return (short) Bytes.beS2(code, curBCI + 1);
+    }
+
+    /**
+     * Sets the bytecode index to the specified value. If {@code bci} is beyond the end of the
+     * array, {@link #currentBC} will return {@link Bytecodes#END} and other methods may throw
+     * {@link ArrayIndexOutOfBoundsException}.
+     *
+     * @param bci the new bytecode index
+     */
+    public void setBCI(int bci) {
+        curBCI = bci;
+        if (curBCI < code.length) {
+            opcode = Bytes.beU1(code, bci);
+            assert opcode < Bytecodes.BREAKPOINT : "illegal bytecode";
+            nextBCI = bci + lengthOf();
+        } else {
+            opcode = Bytecodes.END;
+            nextBCI = curBCI;
+        }
+    }
+
+    /**
+     * Gets the length of the current bytecode.
+     */
+    private int lengthOf() {
+        int length = Bytecodes.lengthOf(opcode);
+        if (length == 0) {
+            switch (opcode) {
+                case Bytecodes.TABLESWITCH: {
+                    return new BytecodeTableSwitch(this, curBCI).size();
+                }
+                case Bytecodes.LOOKUPSWITCH: {
+                    return new BytecodeLookupSwitch(this, curBCI).size();
+                }
+                case Bytecodes.WIDE: {
+                    int opc = Bytes.beU1(code, curBCI + 1);
+                    if (opc == Bytecodes.RET) {
+                        return 4;
+                    } else if (opc == Bytecodes.IINC) {
+                        return 6;
+                    } else {
+                        return 4; // a load or store bytecode
+                    }
+                }
+                default:
+                    throw new Error("unknown variable-length bytecode: " + opcode);
+            }
+        }
+        return length;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeSwitch.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+/**
+ * An abstract class that provides the state and methods common to {@link Bytecodes#LOOKUPSWITCH}
+ * and {@link Bytecodes#TABLESWITCH} instructions.
+ */
+public abstract class BytecodeSwitch {
+
+    /**
+     * The {@link BytecodeStream} containing the bytecode array.
+     */
+    protected final BytecodeStream stream;
+    /**
+     * Index of start of switch instruction.
+     */
+    protected final int bci;
+    /**
+     * Index of the start of the additional data for the switch instruction, aligned to a multiple
+     * of four from the method start.
+     */
+    protected final int alignedBci;
+
+    /**
+     * Constructor for a {@link BytecodeStream}.
+     *
+     * @param stream the {@code BytecodeStream} containing the switch instruction
+     * @param bci the index in the stream of the switch instruction
+     */
+    public BytecodeSwitch(BytecodeStream stream, int bci) {
+        this.stream = stream;
+        this.bci = bci;
+        this.alignedBci = (bci + 4) & 0xfffffffc;
+    }
+
+    /**
+     * Gets the current bytecode index.
+     *
+     * @return the current bytecode index
+     */
+    public int bci() {
+        return bci;
+    }
+
+    /**
+     * Gets the index of the instruction denoted by the {@code i}'th switch target.
+     *
+     * @param i index of the switch target
+     * @return the index of the instruction denoted by the {@code i}'th switch target
+     */
+    public int targetAt(int i) {
+        return bci + offsetAt(i);
+    }
+
+    /**
+     * Gets the index of the instruction for the default switch target.
+     *
+     * @return the index of the instruction for the default switch target
+     */
+    public int defaultTarget() {
+        return bci + defaultOffset();
+    }
+
+    /**
+     * Gets the offset from the start of the switch instruction to the default switch target.
+     *
+     * @return the offset to the default switch target
+     */
+    public int defaultOffset() {
+        return stream.readInt(alignedBci);
+    }
+
+    /**
+     * Gets the key at {@code i}'th switch target index.
+     *
+     * @param i the switch target index
+     * @return the key at {@code i}'th switch target index
+     */
+    public abstract int keyAt(int i);
+
+    /**
+     * Gets the offset from the start of the switch instruction for the {@code i}'th switch target.
+     *
+     * @param i the switch target index
+     * @return the offset to the {@code i}'th switch target
+     */
+    public abstract int offsetAt(int i);
+
+    /**
+     * Gets the number of switch targets.
+     *
+     * @return the number of switch targets
+     */
+    public abstract int numberOfCases();
+
+    /**
+     * Gets the total size in bytes of the switch instruction.
+     *
+     * @return the total size in bytes of the switch instruction
+     */
+    public abstract int size();
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/BytecodeTableSwitch.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+/**
+ * A utility for processing {@link Bytecodes#TABLESWITCH} bytecodes.
+ */
+public class BytecodeTableSwitch extends BytecodeSwitch {
+
+    private static final int OFFSET_TO_LOW_KEY = 4;
+    private static final int OFFSET_TO_HIGH_KEY = 8;
+    private static final int OFFSET_TO_FIRST_JUMP_OFFSET = 12;
+    private static final int JUMP_OFFSET_SIZE = 4;
+
+    /**
+     * Constructor for a {@link BytecodeStream}.
+     *
+     * @param stream the {@code BytecodeStream} containing the switch instruction
+     * @param bci the index in the stream of the switch instruction
+     */
+    public BytecodeTableSwitch(BytecodeStream stream, int bci) {
+        super(stream, bci);
+    }
+
+    /**
+     * Gets the low key of the table switch.
+     *
+     * @return the low key
+     */
+    public int lowKey() {
+        return stream.readInt(alignedBci + OFFSET_TO_LOW_KEY);
+    }
+
+    /**
+     * Gets the high key of the table switch.
+     *
+     * @return the high key
+     */
+    public int highKey() {
+        return stream.readInt(alignedBci + OFFSET_TO_HIGH_KEY);
+    }
+
+    @Override
+    public int keyAt(int i) {
+        return lowKey() + i;
+    }
+
+    @Override
+    public int offsetAt(int i) {
+        return stream.readInt(alignedBci + OFFSET_TO_FIRST_JUMP_OFFSET + JUMP_OFFSET_SIZE * i);
+    }
+
+    @Override
+    public int numberOfCases() {
+        return highKey() - lowKey() + 1;
+    }
+
+    @Override
+    public int size() {
+        return alignedBci + OFFSET_TO_FIRST_JUMP_OFFSET + JUMP_OFFSET_SIZE * numberOfCases() - bci;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytecodes.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,829 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+import static com.oracle.graal.bytecode.Bytecodes.Flags.*;
+
+import java.lang.reflect.*;
+
+/**
+ * Definitions of the standard Java bytecodes defined by <a href=
+ * "http://java.sun.com/docs/books/jvms/second_edition/html/VMSpecTOC.doc.html"> Java Virtual
+ * Machine Specification</a>.
+ */
+public class Bytecodes {
+
+    // @formatter:off
+    public static final int NOP                  =   0; // 0x00
+    public static final int ACONST_NULL          =   1; // 0x01
+    public static final int ICONST_M1            =   2; // 0x02
+    public static final int ICONST_0             =   3; // 0x03
+    public static final int ICONST_1             =   4; // 0x04
+    public static final int ICONST_2             =   5; // 0x05
+    public static final int ICONST_3             =   6; // 0x06
+    public static final int ICONST_4             =   7; // 0x07
+    public static final int ICONST_5             =   8; // 0x08
+    public static final int LCONST_0             =   9; // 0x09
+    public static final int LCONST_1             =  10; // 0x0A
+    public static final int FCONST_0             =  11; // 0x0B
+    public static final int FCONST_1             =  12; // 0x0C
+    public static final int FCONST_2             =  13; // 0x0D
+    public static final int DCONST_0             =  14; // 0x0E
+    public static final int DCONST_1             =  15; // 0x0F
+    public static final int BIPUSH               =  16; // 0x10
+    public static final int SIPUSH               =  17; // 0x11
+    public static final int LDC                  =  18; // 0x12
+    public static final int LDC_W                =  19; // 0x13
+    public static final int LDC2_W               =  20; // 0x14
+    public static final int ILOAD                =  21; // 0x15
+    public static final int LLOAD                =  22; // 0x16
+    public static final int FLOAD                =  23; // 0x17
+    public static final int DLOAD                =  24; // 0x18
+    public static final int ALOAD                =  25; // 0x19
+    public static final int ILOAD_0              =  26; // 0x1A
+    public static final int ILOAD_1              =  27; // 0x1B
+    public static final int ILOAD_2              =  28; // 0x1C
+    public static final int ILOAD_3              =  29; // 0x1D
+    public static final int LLOAD_0              =  30; // 0x1E
+    public static final int LLOAD_1              =  31; // 0x1F
+    public static final int LLOAD_2              =  32; // 0x20
+    public static final int LLOAD_3              =  33; // 0x21
+    public static final int FLOAD_0              =  34; // 0x22
+    public static final int FLOAD_1              =  35; // 0x23
+    public static final int FLOAD_2              =  36; // 0x24
+    public static final int FLOAD_3              =  37; // 0x25
+    public static final int DLOAD_0              =  38; // 0x26
+    public static final int DLOAD_1              =  39; // 0x27
+    public static final int DLOAD_2              =  40; // 0x28
+    public static final int DLOAD_3              =  41; // 0x29
+    public static final int ALOAD_0              =  42; // 0x2A
+    public static final int ALOAD_1              =  43; // 0x2B
+    public static final int ALOAD_2              =  44; // 0x2C
+    public static final int ALOAD_3              =  45; // 0x2D
+    public static final int IALOAD               =  46; // 0x2E
+    public static final int LALOAD               =  47; // 0x2F
+    public static final int FALOAD               =  48; // 0x30
+    public static final int DALOAD               =  49; // 0x31
+    public static final int AALOAD               =  50; // 0x32
+    public static final int BALOAD               =  51; // 0x33
+    public static final int CALOAD               =  52; // 0x34
+    public static final int SALOAD               =  53; // 0x35
+    public static final int ISTORE               =  54; // 0x36
+    public static final int LSTORE               =  55; // 0x37
+    public static final int FSTORE               =  56; // 0x38
+    public static final int DSTORE               =  57; // 0x39
+    public static final int ASTORE               =  58; // 0x3A
+    public static final int ISTORE_0             =  59; // 0x3B
+    public static final int ISTORE_1             =  60; // 0x3C
+    public static final int ISTORE_2             =  61; // 0x3D
+    public static final int ISTORE_3             =  62; // 0x3E
+    public static final int LSTORE_0             =  63; // 0x3F
+    public static final int LSTORE_1             =  64; // 0x40
+    public static final int LSTORE_2             =  65; // 0x41
+    public static final int LSTORE_3             =  66; // 0x42
+    public static final int FSTORE_0             =  67; // 0x43
+    public static final int FSTORE_1             =  68; // 0x44
+    public static final int FSTORE_2             =  69; // 0x45
+    public static final int FSTORE_3             =  70; // 0x46
+    public static final int DSTORE_0             =  71; // 0x47
+    public static final int DSTORE_1             =  72; // 0x48
+    public static final int DSTORE_2             =  73; // 0x49
+    public static final int DSTORE_3             =  74; // 0x4A
+    public static final int ASTORE_0             =  75; // 0x4B
+    public static final int ASTORE_1             =  76; // 0x4C
+    public static final int ASTORE_2             =  77; // 0x4D
+    public static final int ASTORE_3             =  78; // 0x4E
+    public static final int IASTORE              =  79; // 0x4F
+    public static final int LASTORE              =  80; // 0x50
+    public static final int FASTORE              =  81; // 0x51
+    public static final int DASTORE              =  82; // 0x52
+    public static final int AASTORE              =  83; // 0x53
+    public static final int BASTORE              =  84; // 0x54
+    public static final int CASTORE              =  85; // 0x55
+    public static final int SASTORE              =  86; // 0x56
+    public static final int POP                  =  87; // 0x57
+    public static final int POP2                 =  88; // 0x58
+    public static final int DUP                  =  89; // 0x59
+    public static final int DUP_X1               =  90; // 0x5A
+    public static final int DUP_X2               =  91; // 0x5B
+    public static final int DUP2                 =  92; // 0x5C
+    public static final int DUP2_X1              =  93; // 0x5D
+    public static final int DUP2_X2              =  94; // 0x5E
+    public static final int SWAP                 =  95; // 0x5F
+    public static final int IADD                 =  96; // 0x60
+    public static final int LADD                 =  97; // 0x61
+    public static final int FADD                 =  98; // 0x62
+    public static final int DADD                 =  99; // 0x63
+    public static final int ISUB                 = 100; // 0x64
+    public static final int LSUB                 = 101; // 0x65
+    public static final int FSUB                 = 102; // 0x66
+    public static final int DSUB                 = 103; // 0x67
+    public static final int IMUL                 = 104; // 0x68
+    public static final int LMUL                 = 105; // 0x69
+    public static final int FMUL                 = 106; // 0x6A
+    public static final int DMUL                 = 107; // 0x6B
+    public static final int IDIV                 = 108; // 0x6C
+    public static final int LDIV                 = 109; // 0x6D
+    public static final int FDIV                 = 110; // 0x6E
+    public static final int DDIV                 = 111; // 0x6F
+    public static final int IREM                 = 112; // 0x70
+    public static final int LREM                 = 113; // 0x71
+    public static final int FREM                 = 114; // 0x72
+    public static final int DREM                 = 115; // 0x73
+    public static final int INEG                 = 116; // 0x74
+    public static final int LNEG                 = 117; // 0x75
+    public static final int FNEG                 = 118; // 0x76
+    public static final int DNEG                 = 119; // 0x77
+    public static final int ISHL                 = 120; // 0x78
+    public static final int LSHL                 = 121; // 0x79
+    public static final int ISHR                 = 122; // 0x7A
+    public static final int LSHR                 = 123; // 0x7B
+    public static final int IUSHR                = 124; // 0x7C
+    public static final int LUSHR                = 125; // 0x7D
+    public static final int IAND                 = 126; // 0x7E
+    public static final int LAND                 = 127; // 0x7F
+    public static final int IOR                  = 128; // 0x80
+    public static final int LOR                  = 129; // 0x81
+    public static final int IXOR                 = 130; // 0x82
+    public static final int LXOR                 = 131; // 0x83
+    public static final int IINC                 = 132; // 0x84
+    public static final int I2L                  = 133; // 0x85
+    public static final int I2F                  = 134; // 0x86
+    public static final int I2D                  = 135; // 0x87
+    public static final int L2I                  = 136; // 0x88
+    public static final int L2F                  = 137; // 0x89
+    public static final int L2D                  = 138; // 0x8A
+    public static final int F2I                  = 139; // 0x8B
+    public static final int F2L                  = 140; // 0x8C
+    public static final int F2D                  = 141; // 0x8D
+    public static final int D2I                  = 142; // 0x8E
+    public static final int D2L                  = 143; // 0x8F
+    public static final int D2F                  = 144; // 0x90
+    public static final int I2B                  = 145; // 0x91
+    public static final int I2C                  = 146; // 0x92
+    public static final int I2S                  = 147; // 0x93
+    public static final int LCMP                 = 148; // 0x94
+    public static final int FCMPL                = 149; // 0x95
+    public static final int FCMPG                = 150; // 0x96
+    public static final int DCMPL                = 151; // 0x97
+    public static final int DCMPG                = 152; // 0x98
+    public static final int IFEQ                 = 153; // 0x99
+    public static final int IFNE                 = 154; // 0x9A
+    public static final int IFLT                 = 155; // 0x9B
+    public static final int IFGE                 = 156; // 0x9C
+    public static final int IFGT                 = 157; // 0x9D
+    public static final int IFLE                 = 158; // 0x9E
+    public static final int IF_ICMPEQ            = 159; // 0x9F
+    public static final int IF_ICMPNE            = 160; // 0xA0
+    public static final int IF_ICMPLT            = 161; // 0xA1
+    public static final int IF_ICMPGE            = 162; // 0xA2
+    public static final int IF_ICMPGT            = 163; // 0xA3
+    public static final int IF_ICMPLE            = 164; // 0xA4
+    public static final int IF_ACMPEQ            = 165; // 0xA5
+    public static final int IF_ACMPNE            = 166; // 0xA6
+    public static final int GOTO                 = 167; // 0xA7
+    public static final int JSR                  = 168; // 0xA8
+    public static final int RET                  = 169; // 0xA9
+    public static final int TABLESWITCH          = 170; // 0xAA
+    public static final int LOOKUPSWITCH         = 171; // 0xAB
+    public static final int IRETURN              = 172; // 0xAC
+    public static final int LRETURN              = 173; // 0xAD
+    public static final int FRETURN              = 174; // 0xAE
+    public static final int DRETURN              = 175; // 0xAF
+    public static final int ARETURN              = 176; // 0xB0
+    public static final int RETURN               = 177; // 0xB1
+    public static final int GETSTATIC            = 178; // 0xB2
+    public static final int PUTSTATIC            = 179; // 0xB3
+    public static final int GETFIELD             = 180; // 0xB4
+    public static final int PUTFIELD             = 181; // 0xB5
+    public static final int INVOKEVIRTUAL        = 182; // 0xB6
+    public static final int INVOKESPECIAL        = 183; // 0xB7
+    public static final int INVOKESTATIC         = 184; // 0xB8
+    public static final int INVOKEINTERFACE      = 185; // 0xB9
+    public static final int INVOKEDYNAMIC        = 186; // 0xBA
+    public static final int NEW                  = 187; // 0xBB
+    public static final int NEWARRAY             = 188; // 0xBC
+    public static final int ANEWARRAY            = 189; // 0xBD
+    public static final int ARRAYLENGTH          = 190; // 0xBE
+    public static final int ATHROW               = 191; // 0xBF
+    public static final int CHECKCAST            = 192; // 0xC0
+    public static final int INSTANCEOF           = 193; // 0xC1
+    public static final int MONITORENTER         = 194; // 0xC2
+    public static final int MONITOREXIT          = 195; // 0xC3
+    public static final int WIDE                 = 196; // 0xC4
+    public static final int MULTIANEWARRAY       = 197; // 0xC5
+    public static final int IFNULL               = 198; // 0xC6
+    public static final int IFNONNULL            = 199; // 0xC7
+    public static final int GOTO_W               = 200; // 0xC8
+    public static final int JSR_W                = 201; // 0xC9
+    public static final int BREAKPOINT           = 202; // 0xCA
+
+    public static final int ILLEGAL = 255;
+    public static final int END = 256;
+    // @formatter:on
+
+    /**
+     * The last opcode defined by the JVM specification. To iterate over all JVM bytecodes:
+     *
+     * <pre>
+     * for (int opcode = 0; opcode &lt;= Bytecodes.LAST_JVM_OPCODE; ++opcode) {
+     *     //
+     * }
+     * </pre>
+     */
+    public static final int LAST_JVM_OPCODE = JSR_W;
+
+    /**
+     * A collection of flags describing various bytecode attributes.
+     */
+    static class Flags {
+
+        /**
+         * Denotes an instruction that ends a basic block and does not let control flow fall through
+         * to its lexical successor.
+         */
+        static final int STOP = 0x00000001;
+
+        /**
+         * Denotes an instruction that ends a basic block and may let control flow fall through to
+         * its lexical successor. In practice this means it is a conditional branch.
+         */
+        static final int FALL_THROUGH = 0x00000002;
+
+        /**
+         * Denotes an instruction that has a 2 or 4 byte operand that is an offset to another
+         * instruction in the same method. This does not include the {@link Bytecodes#TABLESWITCH}
+         * or {@link Bytecodes#LOOKUPSWITCH} instructions.
+         */
+        static final int BRANCH = 0x00000004;
+
+        /**
+         * Denotes an instruction that reads the value of a static or instance field.
+         */
+        static final int FIELD_READ = 0x00000008;
+
+        /**
+         * Denotes an instruction that writes the value of a static or instance field.
+         */
+        static final int FIELD_WRITE = 0x00000010;
+
+        /**
+         * Denotes an instruction that can cause a trap.
+         */
+        static final int TRAP = 0x00000080;
+        /**
+         * Denotes an instruction that is commutative.
+         */
+        static final int COMMUTATIVE = 0x00000100;
+        /**
+         * Denotes an instruction that is associative.
+         */
+        static final int ASSOCIATIVE = 0x00000200;
+        /**
+         * Denotes an instruction that loads an operand.
+         */
+        static final int LOAD = 0x00000400;
+        /**
+         * Denotes an instruction that stores an operand.
+         */
+        static final int STORE = 0x00000800;
+        /**
+         * Denotes the 4 INVOKE* instructions.
+         */
+        static final int INVOKE = 0x00001000;
+    }
+
+    // Performs a sanity check that none of the flags overlap.
+    static {
+        int allFlags = 0;
+        try {
+            for (Field field : Flags.class.getDeclaredFields()) {
+                int flagsFilter = Modifier.FINAL | Modifier.STATIC;
+                if ((field.getModifiers() & flagsFilter) == flagsFilter && !field.isSynthetic()) {
+                    assert field.getType() == int.class : "Field is not int : " + field;
+                    final int flag = field.getInt(null);
+                    assert flag != 0;
+                    assert (flag & allFlags) == 0 : field.getName() + " has a value conflicting with another flag";
+                    allFlags |= flag;
+                }
+            }
+        } catch (Exception e) {
+            throw new InternalError(e.toString());
+        }
+    }
+
+    /**
+     * An array that maps from a bytecode value to a {@link String} for the corresponding
+     * instruction mnemonic.
+     */
+    private static final String[] nameArray = new String[256];
+
+    /**
+     * An array that maps from a bytecode value to the set of {@link Flags} for the corresponding
+     * instruction.
+     */
+    private static final int[] flagsArray = new int[256];
+
+    /**
+     * An array that maps from a bytecode value to the length in bytes for the corresponding
+     * instruction.
+     */
+    private static final int[] lengthArray = new int[256];
+
+    /**
+     * An array that maps from a bytecode value to the number of slots pushed on the stack by the
+     * corresponding instruction.
+     */
+    private static final int[] stackEffectArray = new int[256];
+
+    // Checkstyle: stop
+    // @formatter:off
+    static {
+        def(NOP                 , "nop"             , "b"    ,  0);
+        def(ACONST_NULL         , "aconst_null"     , "b"    ,  1);
+        def(ICONST_M1           , "iconst_m1"       , "b"    ,  1);
+        def(ICONST_0            , "iconst_0"        , "b"    ,  1);
+        def(ICONST_1            , "iconst_1"        , "b"    ,  1);
+        def(ICONST_2            , "iconst_2"        , "b"    ,  1);
+        def(ICONST_3            , "iconst_3"        , "b"    ,  1);
+        def(ICONST_4            , "iconst_4"        , "b"    ,  1);
+        def(ICONST_5            , "iconst_5"        , "b"    ,  1);
+        def(LCONST_0            , "lconst_0"        , "b"    ,  2);
+        def(LCONST_1            , "lconst_1"        , "b"    ,  2);
+        def(FCONST_0            , "fconst_0"        , "b"    ,  1);
+        def(FCONST_1            , "fconst_1"        , "b"    ,  1);
+        def(FCONST_2            , "fconst_2"        , "b"    ,  1);
+        def(DCONST_0            , "dconst_0"        , "b"    ,  2);
+        def(DCONST_1            , "dconst_1"        , "b"    ,  2);
+        def(BIPUSH              , "bipush"          , "bc"   ,  1);
+        def(SIPUSH              , "sipush"          , "bcc"  ,  1);
+        def(LDC                 , "ldc"             , "bi"   ,  1, TRAP);
+        def(LDC_W               , "ldc_w"           , "bii"  ,  1, TRAP);
+        def(LDC2_W              , "ldc2_w"          , "bii"  ,  2, TRAP);
+        def(ILOAD               , "iload"           , "bi"   ,  1, LOAD);
+        def(LLOAD               , "lload"           , "bi"   ,  2, LOAD);
+        def(FLOAD               , "fload"           , "bi"   ,  1, LOAD);
+        def(DLOAD               , "dload"           , "bi"   ,  2, LOAD);
+        def(ALOAD               , "aload"           , "bi"   ,  1, LOAD);
+        def(ILOAD_0             , "iload_0"         , "b"    ,  1, LOAD);
+        def(ILOAD_1             , "iload_1"         , "b"    ,  1, LOAD);
+        def(ILOAD_2             , "iload_2"         , "b"    ,  1, LOAD);
+        def(ILOAD_3             , "iload_3"         , "b"    ,  1, LOAD);
+        def(LLOAD_0             , "lload_0"         , "b"    ,  2, LOAD);
+        def(LLOAD_1             , "lload_1"         , "b"    ,  2, LOAD);
+        def(LLOAD_2             , "lload_2"         , "b"    ,  2, LOAD);
+        def(LLOAD_3             , "lload_3"         , "b"    ,  2, LOAD);
+        def(FLOAD_0             , "fload_0"         , "b"    ,  1, LOAD);
+        def(FLOAD_1             , "fload_1"         , "b"    ,  1, LOAD);
+        def(FLOAD_2             , "fload_2"         , "b"    ,  1, LOAD);
+        def(FLOAD_3             , "fload_3"         , "b"    ,  1, LOAD);
+        def(DLOAD_0             , "dload_0"         , "b"    ,  2, LOAD);
+        def(DLOAD_1             , "dload_1"         , "b"    ,  2, LOAD);
+        def(DLOAD_2             , "dload_2"         , "b"    ,  2, LOAD);
+        def(DLOAD_3             , "dload_3"         , "b"    ,  2, LOAD);
+        def(ALOAD_0             , "aload_0"         , "b"    ,  1, LOAD);
+        def(ALOAD_1             , "aload_1"         , "b"    ,  1, LOAD);
+        def(ALOAD_2             , "aload_2"         , "b"    ,  1, LOAD);
+        def(ALOAD_3             , "aload_3"         , "b"    ,  1, LOAD);
+        def(IALOAD              , "iaload"          , "b"    , -1, TRAP);
+        def(LALOAD              , "laload"          , "b"    ,  0, TRAP);
+        def(FALOAD              , "faload"          , "b"    , -1, TRAP);
+        def(DALOAD              , "daload"          , "b"    ,  0, TRAP);
+        def(AALOAD              , "aaload"          , "b"    , -1, TRAP);
+        def(BALOAD              , "baload"          , "b"    , -1, TRAP);
+        def(CALOAD              , "caload"          , "b"    , -1, TRAP);
+        def(SALOAD              , "saload"          , "b"    , -1, TRAP);
+        def(ISTORE              , "istore"          , "bi"   , -1, STORE);
+        def(LSTORE              , "lstore"          , "bi"   , -2, STORE);
+        def(FSTORE              , "fstore"          , "bi"   , -1, STORE);
+        def(DSTORE              , "dstore"          , "bi"   , -2, STORE);
+        def(ASTORE              , "astore"          , "bi"   , -1, STORE);
+        def(ISTORE_0            , "istore_0"        , "b"    , -1, STORE);
+        def(ISTORE_1            , "istore_1"        , "b"    , -1, STORE);
+        def(ISTORE_2            , "istore_2"        , "b"    , -1, STORE);
+        def(ISTORE_3            , "istore_3"        , "b"    , -1, STORE);
+        def(LSTORE_0            , "lstore_0"        , "b"    , -2, STORE);
+        def(LSTORE_1            , "lstore_1"        , "b"    , -2, STORE);
+        def(LSTORE_2            , "lstore_2"        , "b"    , -2, STORE);
+        def(LSTORE_3            , "lstore_3"        , "b"    , -2, STORE);
+        def(FSTORE_0            , "fstore_0"        , "b"    , -1, STORE);
+        def(FSTORE_1            , "fstore_1"        , "b"    , -1, STORE);
+        def(FSTORE_2            , "fstore_2"        , "b"    , -1, STORE);
+        def(FSTORE_3            , "fstore_3"        , "b"    , -1, STORE);
+        def(DSTORE_0            , "dstore_0"        , "b"    , -2, STORE);
+        def(DSTORE_1            , "dstore_1"        , "b"    , -2, STORE);
+        def(DSTORE_2            , "dstore_2"        , "b"    , -2, STORE);
+        def(DSTORE_3            , "dstore_3"        , "b"    , -2, STORE);
+        def(ASTORE_0            , "astore_0"        , "b"    , -1, STORE);
+        def(ASTORE_1            , "astore_1"        , "b"    , -1, STORE);
+        def(ASTORE_2            , "astore_2"        , "b"    , -1, STORE);
+        def(ASTORE_3            , "astore_3"        , "b"    , -1, STORE);
+        def(IASTORE             , "iastore"         , "b"    , -3, TRAP);
+        def(LASTORE             , "lastore"         , "b"    , -4, TRAP);
+        def(FASTORE             , "fastore"         , "b"    , -3, TRAP);
+        def(DASTORE             , "dastore"         , "b"    , -4, TRAP);
+        def(AASTORE             , "aastore"         , "b"    , -3, TRAP);
+        def(BASTORE             , "bastore"         , "b"    , -3, TRAP);
+        def(CASTORE             , "castore"         , "b"    , -3, TRAP);
+        def(SASTORE             , "sastore"         , "b"    , -3, TRAP);
+        def(POP                 , "pop"             , "b"    , -1);
+        def(POP2                , "pop2"            , "b"    , -2);
+        def(DUP                 , "dup"             , "b"    ,  1);
+        def(DUP_X1              , "dup_x1"          , "b"    ,  1);
+        def(DUP_X2              , "dup_x2"          , "b"    ,  1);
+        def(DUP2                , "dup2"            , "b"    ,  2);
+        def(DUP2_X1             , "dup2_x1"         , "b"    ,  2);
+        def(DUP2_X2             , "dup2_x2"         , "b"    ,  2);
+        def(SWAP                , "swap"            , "b"    ,  0);
+        def(IADD                , "iadd"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(LADD                , "ladd"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(FADD                , "fadd"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(DADD                , "dadd"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(ISUB                , "isub"            , "b"    , -1);
+        def(LSUB                , "lsub"            , "b"    , -2);
+        def(FSUB                , "fsub"            , "b"    , -1);
+        def(DSUB                , "dsub"            , "b"    , -2);
+        def(IMUL                , "imul"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(LMUL                , "lmul"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(FMUL                , "fmul"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(DMUL                , "dmul"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(IDIV                , "idiv"            , "b"    , -1, TRAP);
+        def(LDIV                , "ldiv"            , "b"    , -2, TRAP);
+        def(FDIV                , "fdiv"            , "b"    , -1);
+        def(DDIV                , "ddiv"            , "b"    , -2);
+        def(IREM                , "irem"            , "b"    , -1, TRAP);
+        def(LREM                , "lrem"            , "b"    , -2, TRAP);
+        def(FREM                , "frem"            , "b"    , -1);
+        def(DREM                , "drem"            , "b"    , -2);
+        def(INEG                , "ineg"            , "b"    ,  0);
+        def(LNEG                , "lneg"            , "b"    ,  0);
+        def(FNEG                , "fneg"            , "b"    ,  0);
+        def(DNEG                , "dneg"            , "b"    ,  0);
+        def(ISHL                , "ishl"            , "b"    , -1);
+        def(LSHL                , "lshl"            , "b"    , -1);
+        def(ISHR                , "ishr"            , "b"    , -1);
+        def(LSHR                , "lshr"            , "b"    , -1);
+        def(IUSHR               , "iushr"           , "b"    , -1);
+        def(LUSHR               , "lushr"           , "b"    , -1);
+        def(IAND                , "iand"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(LAND                , "land"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(IOR                 , "ior"             , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(LOR                 , "lor"             , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(IXOR                , "ixor"            , "b"    , -1, COMMUTATIVE | ASSOCIATIVE);
+        def(LXOR                , "lxor"            , "b"    , -2, COMMUTATIVE | ASSOCIATIVE);
+        def(IINC                , "iinc"            , "bic"  ,  0, LOAD | STORE);
+        def(I2L                 , "i2l"             , "b"    ,  1);
+        def(I2F                 , "i2f"             , "b"    ,  0);
+        def(I2D                 , "i2d"             , "b"    ,  1);
+        def(L2I                 , "l2i"             , "b"    , -1);
+        def(L2F                 , "l2f"             , "b"    , -1);
+        def(L2D                 , "l2d"             , "b"    ,  0);
+        def(F2I                 , "f2i"             , "b"    ,  0);
+        def(F2L                 , "f2l"             , "b"    ,  1);
+        def(F2D                 , "f2d"             , "b"    ,  1);
+        def(D2I                 , "d2i"             , "b"    , -1);
+        def(D2L                 , "d2l"             , "b"    ,  0);
+        def(D2F                 , "d2f"             , "b"    , -1);
+        def(I2B                 , "i2b"             , "b"    ,  0);
+        def(I2C                 , "i2c"             , "b"    ,  0);
+        def(I2S                 , "i2s"             , "b"    ,  0);
+        def(LCMP                , "lcmp"            , "b"    , -3);
+        def(FCMPL               , "fcmpl"           , "b"    , -1);
+        def(FCMPG               , "fcmpg"           , "b"    , -1);
+        def(DCMPL               , "dcmpl"           , "b"    , -3);
+        def(DCMPG               , "dcmpg"           , "b"    , -3);
+        def(IFEQ                , "ifeq"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFNE                , "ifne"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFLT                , "iflt"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFGE                , "ifge"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFGT                , "ifgt"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFLE                , "ifle"            , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IF_ICMPEQ           , "if_icmpeq"       , "boo"  , -2, COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ICMPNE           , "if_icmpne"       , "boo"  , -2, COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ICMPLT           , "if_icmplt"       , "boo"  , -2, FALL_THROUGH | BRANCH);
+        def(IF_ICMPGE           , "if_icmpge"       , "boo"  , -2, FALL_THROUGH | BRANCH);
+        def(IF_ICMPGT           , "if_icmpgt"       , "boo"  , -2, FALL_THROUGH | BRANCH);
+        def(IF_ICMPLE           , "if_icmple"       , "boo"  , -2, FALL_THROUGH | BRANCH);
+        def(IF_ACMPEQ           , "if_acmpeq"       , "boo"  , -2, COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(IF_ACMPNE           , "if_acmpne"       , "boo"  , -2, COMMUTATIVE | FALL_THROUGH | BRANCH);
+        def(GOTO                , "goto"            , "boo"  ,  0, STOP | BRANCH);
+        def(JSR                 , "jsr"             , "boo"  ,  0, STOP | BRANCH);
+        def(RET                 , "ret"             , "bi"   ,  0, STOP);
+        def(TABLESWITCH         , "tableswitch"     , ""     , -1, STOP);
+        def(LOOKUPSWITCH        , "lookupswitch"    , ""     , -1, STOP);
+        def(IRETURN             , "ireturn"         , "b"    , -1, TRAP | STOP);
+        def(LRETURN             , "lreturn"         , "b"    , -2, TRAP | STOP);
+        def(FRETURN             , "freturn"         , "b"    , -1, TRAP | STOP);
+        def(DRETURN             , "dreturn"         , "b"    , -2, TRAP | STOP);
+        def(ARETURN             , "areturn"         , "b"    , -1, TRAP | STOP);
+        def(RETURN              , "return"          , "b"    ,  0, TRAP | STOP);
+        def(GETSTATIC           , "getstatic"       , "bjj"  ,  1, TRAP | FIELD_READ);
+        def(PUTSTATIC           , "putstatic"       , "bjj"  , -1, TRAP | FIELD_WRITE);
+        def(GETFIELD            , "getfield"        , "bjj"  ,  0, TRAP | FIELD_READ);
+        def(PUTFIELD            , "putfield"        , "bjj"  , -2, TRAP | FIELD_WRITE);
+        def(INVOKEVIRTUAL       , "invokevirtual"   , "bjj"  , -1, TRAP | INVOKE);
+        def(INVOKESPECIAL       , "invokespecial"   , "bjj"  , -1, TRAP | INVOKE);
+        def(INVOKESTATIC        , "invokestatic"    , "bjj"  ,  0, TRAP | INVOKE);
+        def(INVOKEINTERFACE     , "invokeinterface" , "bjja_", -1, TRAP | INVOKE);
+        def(INVOKEDYNAMIC       , "invokedynamic"   , "bjjjj",  0, TRAP | INVOKE);
+        def(NEW                 , "new"             , "bii"  ,  1, TRAP);
+        def(NEWARRAY            , "newarray"        , "bc"   ,  0, TRAP);
+        def(ANEWARRAY           , "anewarray"       , "bii"  ,  0, TRAP);
+        def(ARRAYLENGTH         , "arraylength"     , "b"    ,  0, TRAP);
+        def(ATHROW              , "athrow"          , "b"    , -1, TRAP | STOP);
+        def(CHECKCAST           , "checkcast"       , "bii"  ,  0, TRAP);
+        def(INSTANCEOF          , "instanceof"      , "bii"  ,  0, TRAP);
+        def(MONITORENTER        , "monitorenter"    , "b"    , -1, TRAP);
+        def(MONITOREXIT         , "monitorexit"     , "b"    , -1, TRAP);
+        def(WIDE                , "wide"            , ""     ,  0);
+        def(MULTIANEWARRAY      , "multianewarray"  , "biic" ,  1, TRAP);
+        def(IFNULL              , "ifnull"          , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(IFNONNULL           , "ifnonnull"       , "boo"  , -1, FALL_THROUGH | BRANCH);
+        def(GOTO_W              , "goto_w"          , "boooo",  0, STOP | BRANCH);
+        def(JSR_W               , "jsr_w"           , "boooo",  0, STOP | BRANCH);
+        def(BREAKPOINT          , "breakpoint"      , "b"    ,  0, TRAP);
+    }
+    // @formatter:on
+    // Checkstyle: resume
+
+    /**
+     * Determines if an opcode is commutative.
+     *
+     * @param opcode the opcode to check
+     * @return {@code true} iff commutative
+     */
+    public static boolean isCommutative(int opcode) {
+        return (flagsArray[opcode & 0xff] & COMMUTATIVE) != 0;
+    }
+
+    /**
+     * Gets the length of an instruction denoted by a given opcode.
+     *
+     * @param opcode an instruction opcode
+     * @return the length of the instruction denoted by {@code opcode}. If {@code opcode} is an
+     *         illegal instruction or denotes a variable length instruction (e.g.
+     *         {@link #TABLESWITCH}), then 0 is returned.
+     */
+    public static int lengthOf(int opcode) {
+        return lengthArray[opcode & 0xff];
+    }
+
+    /**
+     * Gets the effect on the depth of the expression stack of an instruction denoted by a given
+     * opcode.
+     *
+     * @param opcode an instruction opcode
+     * @return the change in the stack caused by the instruction denoted by {@code opcode}. If
+     *         {@code opcode} is an illegal instruction then 0 is returned. Note that invoke
+     *         instructions may pop more arguments so this value is a minimum stack effect.
+     */
+    public static int stackEffectOf(int opcode) {
+        return stackEffectArray[opcode & 0xff];
+    }
+
+    /**
+     * Gets the lower-case mnemonic for a given opcode.
+     *
+     * @param opcode an opcode
+     * @return the mnemonic for {@code opcode} or {@code "<illegal opcode: " + opcode + ">"} if
+     *         {@code opcode} is not a legal opcode
+     */
+    public static String nameOf(int opcode) throws IllegalArgumentException {
+        String name = nameArray[opcode & 0xff];
+        if (name == null) {
+            return "<illegal opcode: " + opcode + ">";
+        }
+        return name;
+    }
+
+    /**
+     * Allocation-free version of {@linkplain #nameOf(int)}.
+     *
+     * @param opcode an opcode.
+     * @return the mnemonic for {@code opcode} or {@code "<illegal opcode>"} if {@code opcode} is
+     *         not a legal opcode.
+     */
+    public static String baseNameOf(int opcode) {
+        String name = nameArray[opcode & 0xff];
+        if (name == null) {
+            return "<illegal opcode>";
+        }
+        return name;
+    }
+
+    /**
+     * Gets the opcode corresponding to a given mnemonic.
+     *
+     * @param name an opcode mnemonic
+     * @return the opcode corresponding to {@code mnemonic}
+     * @throws IllegalArgumentException if {@code name} does not denote a valid opcode
+     */
+    public static int valueOf(String name) {
+        for (int opcode = 0; opcode < nameArray.length; ++opcode) {
+            if (name.equalsIgnoreCase(nameArray[opcode])) {
+                return opcode;
+            }
+        }
+        throw new IllegalArgumentException("No opcode for " + name);
+    }
+
+    /**
+     * Determines if a given opcode denotes an instruction that can cause an implicit exception.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} can cause an implicit exception, {@code false}
+     *         otherwise
+     */
+    public static boolean canTrap(int opcode) {
+        return (flagsArray[opcode & 0xff] & TRAP) != 0;
+    }
+
+    /**
+     * Determines if a given opcode denotes an instruction that loads a local variable to the
+     * operand stack.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} loads a local variable to the operand stack,
+     *         {@code false} otherwise
+     */
+    public static boolean isLoad(int opcode) {
+        return (flagsArray[opcode & 0xff] & LOAD) != 0;
+    }
+
+    /**
+     * Determines if a given opcode denotes an instruction that ends a basic block and does not let
+     * control flow fall through to its lexical successor.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} properly ends a basic block
+     */
+    public static boolean isStop(int opcode) {
+        return (flagsArray[opcode & 0xff] & STOP) != 0;
+    }
+
+    /**
+     * Determines if a given opcode denotes an instruction that stores a value to a local variable
+     * after popping it from the operand stack.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} stores a value to a local variable, {@code false}
+     *         otherwise
+     */
+    public static boolean isInvoke(int opcode) {
+        return (flagsArray[opcode & 0xff] & INVOKE) != 0;
+    }
+
+    /**
+     * Determines if a given opcode denotes an instruction that stores a value to a local variable
+     * after popping it from the operand stack.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} stores a value to a local variable, {@code false}
+     *         otherwise
+     */
+    public static boolean isStore(int opcode) {
+        return (flagsArray[opcode & 0xff] & STORE) != 0;
+    }
+
+    /**
+     * Determines if a given opcode is an instruction that delimits a basic block.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} delimits a basic block
+     */
+    public static boolean isBlockEnd(int opcode) {
+        return (flagsArray[opcode & 0xff] & (STOP | FALL_THROUGH)) != 0;
+    }
+
+    /**
+     * Determines if a given opcode is an instruction that has a 2 or 4 byte operand that is an
+     * offset to another instruction in the same method. This does not include the
+     * {@linkplain #TABLESWITCH switch} instructions.
+     *
+     * @param opcode an opcode to test
+     * @return {@code true} iff {@code opcode} is a branch instruction with a single operand
+     */
+    public static boolean isBranch(int opcode) {
+        return (flagsArray[opcode & 0xff] & BRANCH) != 0;
+    }
+
+    /**
+     * Determines if a given opcode denotes a conditional branch.
+     *
+     * @param opcode
+     * @return {@code true} iff {@code opcode} is a conditional branch
+     */
+    public static boolean isConditionalBranch(int opcode) {
+        return (flagsArray[opcode & 0xff] & FALL_THROUGH) != 0;
+    }
+
+    /**
+     * Gets the arithmetic operator name for a given opcode. If {@code opcode} does not denote an
+     * arithmetic instruction, then the {@linkplain #nameOf(int) name} of the opcode is returned
+     * instead.
+     *
+     * @param op an opcode
+     * @return the arithmetic operator name
+     */
+    public static String operator(int op) {
+        // Checkstyle: stop
+        switch (op) {
+        // arithmetic ops
+            case IADD: // fall through
+            case LADD: // fall through
+            case FADD: // fall through
+            case DADD:
+                return "+";
+            case ISUB: // fall through
+            case LSUB: // fall through
+            case FSUB: // fall through
+            case DSUB:
+                return "-";
+            case IMUL: // fall through
+            case LMUL: // fall through
+            case FMUL: // fall through
+            case DMUL:
+                return "*";
+            case IDIV: // fall through
+            case LDIV: // fall through
+            case FDIV: // fall through
+            case DDIV:
+                return "/";
+            case IREM: // fall through
+            case LREM: // fall through
+            case FREM: // fall through
+            case DREM:
+                return "%";
+                // shift ops
+            case ISHL: // fall through
+            case LSHL:
+                return "<<";
+            case ISHR: // fall through
+            case LSHR:
+                return ">>";
+            case IUSHR: // fall through
+            case LUSHR:
+                return ">>>";
+                // logic ops
+            case IAND: // fall through
+            case LAND:
+                return "&";
+            case IOR: // fall through
+            case LOR:
+                return "|";
+            case IXOR: // fall through
+            case LXOR:
+                return "^";
+        }
+        // Checkstyle: resume
+        return nameOf(op);
+    }
+
+    /**
+     * Defines a bytecode by entering it into the arrays that record its name, length and flags.
+     *
+     * @param name instruction name (should be lower case)
+     * @param format encodes the length of the instruction
+     */
+    private static void def(int opcode, String name, String format, int stackEffect) {
+        def(opcode, name, format, stackEffect, 0);
+    }
+
+    /**
+     * Defines a bytecode by entering it into the arrays that record its name, length and flags.
+     *
+     * @param name instruction name (lower case)
+     * @param format encodes the length of the instruction
+     * @param flags the set of {@link Flags} associated with the instruction
+     */
+    private static void def(int opcode, String name, String format, int stackEffect, int flags) {
+        assert nameArray[opcode] == null : "opcode " + opcode + " is already bound to name " + nameArray[opcode];
+        nameArray[opcode] = name;
+        int instructionLength = format.length();
+        lengthArray[opcode] = instructionLength;
+        stackEffectArray[opcode] = stackEffect;
+        Bytecodes.flagsArray[opcode] = flags;
+
+        assert !isConditionalBranch(opcode) || isBranch(opcode) : "a conditional branch must also be a branch";
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/graal/com.oracle.graal.bytecode/src/com/oracle/graal/bytecode/Bytes.java	Thu Jun 04 13:35:47 2015 +0200
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package com.oracle.graal.bytecode;
+
+/**
+ * A collection of utility methods for dealing with bytes, particularly in byte arrays.
+ */
+public class Bytes {
+
+    /**
+     * Gets a signed 1-byte value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @return the signed 1-byte value at index {@code bci} in array {@code data}
+     */
+    public static int beS1(byte[] data, int bci) {
+        return data[bci];
+    }
+
+    /**
+     * Gets a signed 2-byte big-endian value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @return the signed 2-byte, big-endian, value at index {@code bci} in array {@code data}
+     */
+    public static int beS2(byte[] data, int bci) {
+        return (data[bci] << 8) | (data[bci + 1] & 0xff);
+    }
+
+    /**
+     * Gets an unsigned 1-byte value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @return the unsigned 1-byte value at index {@code bci} in array {@code data}
+     */
+    public static int beU1(byte[] data, int bci) {
+        return data[bci] & 0xff;
+    }
+
+    /**
+     * Gets an unsigned 2-byte big-endian value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @return the unsigned 2-byte, big-endian, value at index {@code bci} in array {@code data}
+     */
+    public static int beU2(byte[] data, int bci) {
+        return ((data[bci] & 0xff) << 8) | (data[bci + 1] & 0xff);
+    }
+
+    /**
+     * Gets a signed 4-byte big-endian value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @return the signed 4-byte, big-endian, value at index {@code bci} in array {@code data}
+     */
+    public static int beS4(byte[] data, int bci) {
+        return (data[bci] << 24) | ((data[bci + 1] & 0xff) << 16) | ((data[bci + 2] & 0xff) << 8) | (data[bci + 3] & 0xff);
+    }
+
+    /**
+     * Gets either a signed 2-byte or a signed 4-byte big-endian value.
+     *
+     * @param data the array containing the data
+     * @param bci the start index of the value to retrieve
+     * @param fourByte if true, this method will return a 4-byte value
+     * @return the signed 2 or 4-byte, big-endian, value at index {@code bci} in array {@code data}
+     */
+    public static int beSVar(byte[] data, int bci, boolean fourByte) {
+        if (fourByte) {
+            return beS4(data, bci);
+        } else {
+            return beS2(data, bci);
+        }
+    }
+}
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64LIRGenerator.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,9 +24,6 @@
 package com.oracle.graal.compiler.amd64;
 
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.*;
-import com.oracle.jvmci.asm.amd64.AMD64Address.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.ForeignCallLinkage;
@@ -43,17 +40,20 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64MOp.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64Shift.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64MOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64Shift.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.lir.amd64.AMD64Arithmetic.*;
 import static com.oracle.graal.lir.amd64.AMD64MathIntrinsicOp.IntrinsicOpcode.*;
 
 import java.util.*;
 
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Address.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.spi.*;
 import com.oracle.graal.compiler.common.util.*;
--- a/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.amd64/src/com/oracle/graal/compiler/amd64/AMD64NodeLIRBuilder.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,8 +24,6 @@
 package com.oracle.graal.compiler.amd64;
 
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CallingConvention;
 import com.oracle.jvmci.meta.Kind;
 import com.oracle.jvmci.meta.JavaType;
@@ -35,10 +33,12 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.LIRKind;
 
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.compiler.match.*;
--- a/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.sparc/src/com/oracle/graal/compiler/sparc/SPARCLIRGenerator.java	Thu Jun 04 13:35:47 2015 +0200
@@ -29,6 +29,8 @@
 import static com.oracle.graal.lir.sparc.SPARCMathIntrinsicOp.IntrinsicOpcode.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
+import com.oracle.graal.asm.sparc.*;
+import com.oracle.graal.asm.sparc.SPARCAssembler.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.spi.*;
 import com.oracle.graal.lir.*;
@@ -58,9 +60,6 @@
 import com.oracle.graal.lir.sparc.SPARCMove.SPARCStackMove;
 import com.oracle.graal.lir.sparc.SPARCMove.StackLoadAddressOp;
 import com.oracle.graal.phases.util.*;
-import com.oracle.jvmci.asm.sparc.*;
-import com.oracle.jvmci.asm.sparc.SPARCAssembler.CC;
-import com.oracle.jvmci.asm.sparc.SPARCAssembler.ConditionFlag;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CheckGraalInvariants.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/CheckGraalInvariants.java	Thu Jun 04 13:35:47 2015 +0200
@@ -50,18 +50,18 @@
 import com.oracle.graal.phases.verify.*;
 import com.oracle.graal.printer.*;
 import com.oracle.graal.runtime.*;
+import com.oracle.graal.test.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.code.Register.RegisterCategory;
 import com.oracle.jvmci.debug.*;
 import com.oracle.jvmci.meta.*;
-import com.oracle.jvmci.test.*;
 
 /**
  * Checks that all classes in *graal*.jar and *jvmci*.jar entries on the boot class path comply with
  * global invariants such as using {@link Object#equals(Object)} to compare certain types instead of
  * identity comparisons.
  */
-public class CheckGraalInvariants extends TestBase {
+public class CheckGraalInvariants extends GraalTest {
 
     private static boolean shouldVerifyEquals(ResolvedJavaMethod m) {
         if (m.getName().equals("identityEquals")) {
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/GraalCompilerTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,18 +22,10 @@
  */
 package com.oracle.graal.compiler.test;
 
-import com.oracle.jvmci.code.Architecture;
-import com.oracle.jvmci.code.CompilationResult;
-import com.oracle.jvmci.code.TargetDescription;
-import com.oracle.jvmci.code.InstalledCode;
-import com.oracle.jvmci.code.CallingConvention;
-import com.oracle.jvmci.code.CodeCacheProvider;
-import com.oracle.jvmci.meta.*;
-
-import static com.oracle.jvmci.code.CodeUtil.*;
 import static com.oracle.graal.compiler.GraalCompiler.*;
 import static com.oracle.graal.compiler.common.GraalOptions.*;
 import static com.oracle.graal.nodes.ConstantNode.*;
+import static com.oracle.jvmci.code.CodeUtil.*;
 
 import java.lang.reflect.*;
 import java.util.*;
@@ -43,7 +35,6 @@
 import org.junit.*;
 import org.junit.internal.*;
 
-import com.oracle.jvmci.code.CallingConvention.Type;
 import com.oracle.graal.api.directives.*;
 import com.oracle.graal.api.replacements.*;
 import com.oracle.graal.api.runtime.*;
@@ -70,11 +61,14 @@
 import com.oracle.graal.phases.util.*;
 import com.oracle.graal.printer.*;
 import com.oracle.graal.runtime.*;
+import com.oracle.graal.test.*;
+import com.oracle.jvmci.code.*;
+import com.oracle.jvmci.code.CallingConvention.Type;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.debug.*;
 import com.oracle.jvmci.debug.Debug.Scope;
+import com.oracle.jvmci.meta.*;
 import com.oracle.jvmci.options.*;
-import com.oracle.jvmci.test.*;
 
 /**
  * Base class for Graal compiler unit tests.
@@ -95,7 +89,7 @@
  * <p>
  * These tests will be run by the {@code mx unittest} command.
  */
-public abstract class GraalCompilerTest extends TestBase {
+public abstract class GraalCompilerTest extends GraalTest {
 
     private final Providers providers;
     private final Backend backend;
--- a/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StaticInterfaceFieldTest.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler.test/src/com/oracle/graal/compiler/test/StaticInterfaceFieldTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -38,15 +38,15 @@
 import com.oracle.graal.phases.tiers.*;
 import com.oracle.graal.phases.util.*;
 import com.oracle.graal.runtime.*;
+import com.oracle.graal.test.*;
 import com.oracle.jvmci.debug.*;
 import com.oracle.jvmci.meta.*;
-import com.oracle.jvmci.test.*;
 
 /**
  * Test that interfaces are correctly initialized by a static field resolution during eager graph
  * building.
  */
-public class StaticInterfaceFieldTest extends TestBase {
+public class StaticInterfaceFieldTest extends GraalTest {
 
     private interface I {
         Object CONST = new Object() {
--- a/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.compiler/src/com/oracle/graal/compiler/target/Backend.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.compiler.target;
 
+import com.oracle.graal.asm.*;
 import com.oracle.graal.compiler.common.alloc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.lir.*;
@@ -32,7 +33,6 @@
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.graal.phases.tiers.*;
 import com.oracle.graal.phases.util.*;
-import com.oracle.jvmci.asm.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.code.stack.*;
 import com.oracle.jvmci.common.*;
--- a/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/AMD64HotSpotFrameOmissionTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64.test;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.InstalledCode;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.CallingConvention;
@@ -37,6 +36,7 @@
 
 import org.junit.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.compiler.test.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64.test/src/com/oracle/graal/hotspot/amd64/test/DataPatchInConstantsTest.java	Thu Jun 04 13:35:47 2015 +0200
@@ -32,6 +32,7 @@
 import org.junit.*;
 
 import com.oracle.graal.api.replacements.*;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.graph.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.nodes.*;
@@ -45,7 +46,6 @@
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.spi.*;
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 public class DataPatchInConstantsTest extends HotSpotGraalCompilerTest {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64DeoptimizeOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
 
 @Opcode("DEOPT")
 final class AMD64DeoptimizeOp extends AMD64LIRInstruction implements BlockEndOp {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBackend.java	Thu Jun 04 13:35:47 2015 +0200
@@ -23,9 +23,6 @@
 package com.oracle.graal.hotspot.amd64;
 
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.CompilationResult;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
@@ -43,6 +40,9 @@
 
 import java.util.*;
 
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.common.alloc.*;
 import com.oracle.graal.compiler.gen.*;
 import com.oracle.graal.compiler.target.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotBinaryConsumer.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,13 +24,13 @@
 
 import com.oracle.jvmci.meta.AllocatableValue;
 
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.hotspot.*;
 
 public class AMD64HotSpotBinaryConsumer {
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallEpilogueOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,8 +22,8 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCRuntimeCallPrologueOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableAddressOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCardTableShiftOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.ValueUtil;
 import com.oracle.jvmci.meta.JavaConstant;
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotCounterOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.TargetDescription;
 import com.oracle.jvmci.code.StackSlotValue;
@@ -32,6 +31,7 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.jvmci.common.JVMCIError.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.lir.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotDeoptimizeCallerOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.graal.hotspot.HotSpotHostBackend.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.meta.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEnterUnpackFramesStackFrameOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.RegisterSaveLayout;
@@ -33,6 +32,7 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotEpilogueOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -26,10 +26,10 @@
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotJumpToExceptionHandlerInCallerOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,8 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -31,6 +29,8 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLIRGenerator.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,14 +22,15 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64RMOp.*;
+import static com.oracle.graal.asm.amd64.AMD64Assembler.OperandSize.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64RMOp.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.OperandSize.*;
 
 import java.util.*;
 
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.compiler.common.spi.*;
@@ -48,7 +49,6 @@
 import com.oracle.graal.lir.framemap.*;
 import com.oracle.graal.lir.gen.*;
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.debug.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveCurrentStackFrameOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,11 +24,11 @@
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.lir.framemap.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveDeoptimizedStackFrameOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -30,6 +29,7 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.asm.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotLeaveUnpackFramesStackFrameOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,11 +22,11 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.RegisterConfig;
 import com.oracle.jvmci.code.RegisterSaveLayout;
 import com.oracle.jvmci.meta.Kind;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.SaveRegistersOp;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotMove.java	Thu Jun 04 13:35:47 2015 +0200
@@ -25,6 +25,9 @@
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.common.*;
 import com.oracle.graal.hotspot.*;
 import com.oracle.graal.lir.*;
@@ -32,9 +35,6 @@
 import com.oracle.graal.lir.StandardOp.StackStoreOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.common.*;
 import com.oracle.jvmci.hotspot.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotNodeLIRBuilder.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,11 +22,13 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
+import static com.oracle.graal.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.graal.hotspot.HotSpotBackend.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
-import static com.oracle.jvmci.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
+import com.oracle.graal.asm.*;
+import com.oracle.graal.asm.amd64.AMD64Assembler.*;
 import com.oracle.graal.compiler.amd64.*;
 import com.oracle.graal.compiler.common.calc.*;
 import com.oracle.graal.compiler.common.type.*;
@@ -46,8 +48,6 @@
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.nodes.memory.*;
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.*;
-import com.oracle.jvmci.asm.amd64.AMD64Assembler.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.debug.*;
 import com.oracle.jvmci.hotspot.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPatchReturnAddressOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,13 +22,13 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.meta.AllocatableValue;
 
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotPushInterpreterFrameOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.AllocatableValue;
 
@@ -30,6 +29,7 @@
 import static com.oracle.jvmci.code.ValueUtil.*;
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotReturnOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,10 +24,10 @@
 
 import static com.oracle.graal.lir.LIRInstruction.OperandFlag.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.hotspot.*;
 import com.oracle.jvmci.meta.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotSafepointOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -22,7 +22,6 @@
  */
 package com.oracle.graal.hotspot.amd64;
 
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.RegisterValue;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.code.InfopointReason;
@@ -32,10 +31,11 @@
 import com.oracle.jvmci.meta.AllocatableValue;
 import com.oracle.jvmci.meta.Kind;
 
+import static com.oracle.graal.asm.NumUtil.*;
 import static com.oracle.graal.compiler.common.GraalOptions.*;
 import static com.oracle.jvmci.amd64.AMD64.*;
-import static com.oracle.jvmci.asm.NumUtil.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotSpotUnwindOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -27,12 +27,12 @@
 import static com.oracle.jvmci.amd64.AMD64.*;
 import static com.oracle.jvmci.code.ValueUtil.*;
 
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.hotspot.stubs.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.StandardOp.BlockEndOp;
 import com.oracle.graal.lir.amd64.*;
 import com.oracle.graal.lir.asm.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.*;
 import com.oracle.jvmci.meta.*;
 
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectStaticCallOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,11 +24,11 @@
 
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64HotspotDirectVirtualCallOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -24,12 +24,12 @@
 
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;
+import com.oracle.graal.asm.amd64.*;
 import com.oracle.graal.lir.*;
 import com.oracle.graal.lir.amd64.AMD64Call.DirectCallOp;
 import com.oracle.graal.lir.asm.*;
 import com.oracle.graal.nodes.CallTargetNode.InvokeKind;
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.hotspot.*;
 
 /**
--- a/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java	Thu Jun 04 09:17:32 2015 +0200
+++ b/graal/com.oracle.graal.hotspot.amd64/src/com/oracle/graal/hotspot/amd64/AMD64IndirectCallOp.java	Thu Jun 04 13:35:47 2015 +0200
@@ -23,7 +23,6 @@
 package com.oracle.graal.hotspot.amd64;
 
 import com.oracle.jvmci.amd64.*;
-import com.oracle.jvmci.asm.amd64.*;
 import com.oracle.jvmci.code.Register;
 import com.oracle.jvmci.meta.Value;
 import com.oracle.jvmci.meta.ResolvedJavaMethod;