changeset 10507:329c22feda1f

Merge
author Christian Haeubl <haeubl@ssw.jku.at>
date Mon, 24 Jun 2013 11:56:24 +0200
parents 7344fa3e8833 5db21405c6a4
children 3e9820de1c1c f40010b67b6e
files
diffstat 8 files changed, 456 insertions(+), 33 deletions(-) [+]
line wrap: on
line diff
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/AheadOfTimeCompilationTest.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/AheadOfTimeCompilationTest.java	Mon Jun 24 11:56:24 2013 +0200
@@ -170,6 +170,7 @@
     }
 
     @Test
+    @Ignore
     public void testBoxedBooleanAOT() {
         StructuredGraph result = compile("getBoxedBoolean", true);
 
--- a/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot.test/src/com/oracle/graal/hotspot/test/WriteBarrierAdditionTest.java	Mon Jun 24 11:56:24 2013 +0200
@@ -22,27 +22,60 @@
  */
 package com.oracle.graal.hotspot.test;
 
+import java.lang.ref.*;
+import java.lang.reflect.*;
+
 import org.junit.*;
 
 import com.oracle.graal.api.code.*;
+import com.oracle.graal.api.meta.*;
+import com.oracle.graal.api.runtime.*;
 import com.oracle.graal.compiler.test.*;
 import com.oracle.graal.debug.*;
+import com.oracle.graal.hotspot.meta.*;
 import com.oracle.graal.hotspot.phases.*;
+import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.HeapAccess.WriteBarrierType;
-import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.extended.*;
 import com.oracle.graal.nodes.spi.Lowerable.LoweringType;
+import com.oracle.graal.phases.*;
 import com.oracle.graal.phases.common.*;
+import com.oracle.graal.phases.common.InliningUtil.InlineInfo;
+import com.oracle.graal.phases.common.InliningUtil.InliningPolicy;
 import com.oracle.graal.phases.tiers.*;
 
+/**
+ * The following unit tests assert the presence of write barriers for both Serial and G1 GCs.
+ * Normally, the tests check for compile time inserted barriers. However, there are the cases of
+ * unsafe loads of the java.lang.ref.Reference.referent field where runtime checks have to be
+ * performed also. For those cases, the unit tests check the presence of the compile-time inserted
+ * barriers. Concerning the runtime checks, the results of variable inputs (object types and
+ * offsets) passed as input parameters can be checked against printed output from the G1 write
+ * barrier snippets. The runtime checks have been validated offline.
+ */
+
 public class WriteBarrierAdditionTest extends GraalCompilerTest {
 
+    private final MetaAccessProvider metaAccessProvider;
+
+    public WriteBarrierAdditionTest() {
+        this.metaAccessProvider = Graal.getRequiredCapability(MetaAccessProvider.class);
+    }
+
     public static class Container {
 
         public Container a;
         public Container b;
     }
 
+    /**
+     * Expected 2 barriers for the Serial GC and 4 for G1 (2 pre + 2 post).
+     */
+    @Test
+    public void test1() throws Exception {
+        test("test1Snippet", ((HotSpotRuntime) runtime()).config.useG1GC ? 4 : 2);
+    }
+
     public static void test1Snippet() {
         Container main = new Container();
         Container temp1 = new Container();
@@ -51,6 +84,14 @@
         main.b = temp2;
     }
 
+    /**
+     * Expected 4 barriers for the Serial GC and 8 for G1 (4 pre + 4 post).
+     */
+    @Test
+    public void test2() throws Exception {
+        test("test2Snippet", ((HotSpotRuntime) runtime()).config.useG1GC ? 8 : 4);
+    }
+
     public static void test2Snippet(boolean test) {
         Container main = new Container();
         Container temp1 = new Container();
@@ -66,6 +107,14 @@
         }
     }
 
+    /**
+     * Expected 4 barriers for the Serial GC and 8 for G1 (4 pre + 4 post).
+     */
+    @Test
+    public void test3() throws Exception {
+        test("test3Snippet", ((HotSpotRuntime) runtime()).config.useG1GC ? 8 : 4);
+    }
+
     public static void test3Snippet() {
         Container[] main = new Container[10];
         Container temp1 = new Container();
@@ -77,42 +126,144 @@
         for (int i = 0; i < 10; i++) {
             main[i].a = main[i].b = temp2;
         }
-
     }
 
+    /**
+     * Expected 2 barriers for the Serial GC and 5 for G1 (3 pre + 2 post) The (2 or 4) barriers are
+     * emitted while initializing the fields of the WeakReference instance. The extra pre barrier of
+     * G1 concerns the read of the referent field.
+     */
     @Test
-    public void test1() {
-        test("test1Snippet", 2);
+    public void test4() throws Exception {
+        test("test4Snippet", ((HotSpotRuntime) runtime()).config.useG1GC ? 5 : 2);
     }
 
-    @Test
-    public void test2() {
-        test("test2Snippet", 4);
+    public static Object test4Snippet() {
+        WeakReference<Object> weakRef = new WeakReference<>(new Object());
+        return weakRef.get();
     }
 
+    static WeakReference<Object> wr = new WeakReference<>(new Object());
+    static Container con = new Container();
+
+    /**
+     * Expected 4 barriers for the Serial GC and 9 for G1 (5 pre + 4 post). In this test, we load
+     * the correct offset of the WeakReference object so naturally we assert the presence of the pre
+     * barrier.
+     */
     @Test
-    public void test3() {
-        test("test3Snippet", 4);
+    public void test5() throws Exception {
+        test("test5Snippet", ((HotSpotRuntime) runtime()).config.useG1GC ? 9 : 4);
     }
 
-    private void test(final String snippet, final int expectedBarriers) {
+    public static Object test5Snippet() throws Exception {
+        return UnsafeLoadNode.load(wr, 0, 16, Kind.Object);
+    }
+
+    /**
+     * The following test concern the runtime checks of the unsafe loads. In this test, we unsafely
+     * load the java.lang.ref.Reference.referent field so the pre barier has to be executed.
+     */
+    @Test
+    public void test6() throws Exception {
+        test2("test6Snippet", wr, new Long(HotSpotRuntime.referentOffset()), null);
+    }
+
+    /**
+     * The following test concern the runtime checks of the unsafe loads. In this test, we unsafely
+     * load a matching offset of a wrong object so the pre barier must not be executed.
+     */
+    @Test
+    public void test7() throws Exception {
+        test2("test6Snippet", con, new Long(HotSpotRuntime.referentOffset()), null);
+    }
+
+    /**
+     * The following test concern the runtime checks of the unsafe loads. In this test, we unsafely
+     * load a non-matching offset field of the java.lang.ref.Reference object so the pre barier must
+     * not be executed.
+     */
+    @Test
+    public void test8() throws Exception {
+        test2("test6Snippet", wr, new Long(32), null);
+    }
+
+    @SuppressWarnings("unused")
+    public static Object test6Snippet(Object a, Object b, Object c) throws Exception {
+        return UnsafeLoadNode.load(a, 0, ((Long) b).longValue(), Kind.Object);
+    }
+
+    private HotSpotInstalledCode getInstalledCode(String name) throws Exception {
+        final Method method = WriteBarrierAdditionTest.class.getMethod(name, Object.class, Object.class, Object.class);
+        final HotSpotResolvedJavaMethod javaMethod = (HotSpotResolvedJavaMethod) metaAccessProvider.lookupJavaMethod(method);
+        final HotSpotInstalledCode installedBenchmarkCode = (HotSpotInstalledCode) getCode(javaMethod, parse(method));
+        return installedBenchmarkCode;
+    }
+
+    private void test(final String snippet, final int expectedBarriers) throws Exception, SecurityException {
         Debug.scope("WriteBarrierAditionTest", new DebugDumpScope(snippet), new Runnable() {
 
             public void run() {
                 StructuredGraph graph = parse(snippet);
                 HighTierContext context = new HighTierContext(runtime(), new Assumptions(false), replacements);
+                new InliningPhase(runtime(), replacements, context.getAssumptions(), null, getDefaultPhasePlan(), OptimisticOptimizations.ALL, new InlineAllPolicy()).apply(graph);
                 new LoweringPhase(LoweringType.BEFORE_GUARDS).apply(graph, context);
                 new WriteBarrierAdditionPhase().apply(graph);
                 Debug.dump(graph, "After Write Barrier Addition");
-                final int barriers = graph.getNodes(SerialWriteBarrier.class).count();
+
+                int barriers = 0;
+                if (((HotSpotRuntime) runtime()).config.useG1GC) {
+                    barriers = graph.getNodes(G1PreWriteBarrier.class).count() + graph.getNodes(G1PostWriteBarrier.class).count();
+                } else {
+                    barriers = graph.getNodes(SerialWriteBarrier.class).count();
+                }
                 Assert.assertTrue(barriers == expectedBarriers);
                 for (WriteNode write : graph.getNodes(WriteNode.class)) {
-                    if (write.getWriteBarrierType() != WriteBarrierType.NONE) {
-                        Assert.assertTrue(write.successors().count() == 1);
-                        Assert.assertTrue(write.next() instanceof SerialWriteBarrier);
+                    if (((HotSpotRuntime) runtime()).config.useG1GC) {
+                        if (write.getWriteBarrierType() != WriteBarrierType.NONE) {
+                            Assert.assertTrue(write.successors().count() == 1);
+                            Assert.assertTrue(write.next() instanceof G1PostWriteBarrier);
+                            Assert.assertTrue(write.predecessor() instanceof G1PreWriteBarrier);
+                        }
+                    } else {
+                        if (write.getWriteBarrierType() != WriteBarrierType.NONE) {
+                            Assert.assertTrue(write.successors().count() == 1);
+                            Assert.assertTrue(write.next() instanceof SerialWriteBarrier);
+                        }
+                    }
+                }
+
+                for (ReadNode read : graph.getNodes(ReadNode.class)) {
+                    if (read.getWriteBarrierType() != WriteBarrierType.NONE) {
+                        if (read.location() instanceof ConstantLocationNode) {
+                            Assert.assertTrue(((ConstantLocationNode) (read.location())).getDisplacement() == HotSpotRuntime.referentOffset());
+                        } else {
+                            Assert.assertTrue(((IndexedLocationNode) (read.location())).getDisplacement() == HotSpotRuntime.referentOffset());
+                        }
+                        Assert.assertTrue(((HotSpotRuntime) runtime()).config.useG1GC);
+                        Assert.assertTrue(read.getWriteBarrierType() == WriteBarrierType.PRECISE);
+                        Assert.assertTrue(read.next() instanceof G1PreWriteBarrier);
                     }
                 }
             }
         });
     }
+
+    private void test2(final String snippet, Object a, Object b, Object c) throws Exception {
+        HotSpotInstalledCode code = getInstalledCode(snippet);
+        code.execute(a, b, c);
+    }
+
+    final class InlineAllPolicy implements InliningPolicy {
+
+        @Override
+        public boolean continueInlining(StructuredGraph graph) {
+            return true;
+        }
+
+        @Override
+        public boolean isWorthInlining(InlineInfo info, int inliningDepth, double probability, double relevance, boolean fullyProcessed) {
+            return true;
+        }
+    }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/meta/HotSpotRuntime.java	Mon Jun 24 11:56:24 2013 +0200
@@ -32,6 +32,8 @@
 import static com.oracle.graal.hotspot.HotSpotForeignCallLinkage.RegisterEffect.*;
 import static com.oracle.graal.hotspot.HotSpotForeignCallLinkage.Transition.*;
 import static com.oracle.graal.hotspot.HotSpotGraalRuntime.*;
+import static com.oracle.graal.hotspot.nodes.G1PostWriteBarrierStubCall.*;
+import static com.oracle.graal.hotspot.nodes.G1PreWriteBarrierStubCall.*;
 import static com.oracle.graal.hotspot.nodes.MonitorExitStubCall.*;
 import static com.oracle.graal.hotspot.nodes.NewArrayStubCall.*;
 import static com.oracle.graal.hotspot.nodes.NewInstanceStubCall.*;
@@ -51,8 +53,6 @@
 import static com.oracle.graal.phases.GraalOptions.*;
 import static com.oracle.graal.replacements.Log.*;
 import static com.oracle.graal.replacements.MathSubstitutionsX86.*;
-import static com.oracle.graal.hotspot.nodes.G1PostWriteBarrierStubCall.*;
-import static com.oracle.graal.hotspot.nodes.G1PreWriteBarrierStubCall.*;
 
 import java.lang.reflect.*;
 import java.util.*;
@@ -534,7 +534,8 @@
             HotSpotResolvedJavaField field = (HotSpotResolvedJavaField) loadField.field();
             ValueNode object = loadField.isStatic() ? ConstantNode.forObject(field.getDeclaringClass().mirror(), this, graph) : loadField.object();
             assert loadField.kind() != Kind.Illegal;
-            ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field), loadField.stamp(), WriteBarrierType.NONE, (loadField.kind() == Kind.Object)));
+            WriteBarrierType barrierType = getFieldLoadBarrierType(field);
+            ReadNode memoryRead = graph.add(new ReadNode(object, createFieldLocation(graph, field), loadField.stamp(), barrierType, (loadField.kind() == Kind.Object)));
             tool.createNullCheckGuard(memoryRead, object);
 
             graph.replaceFixedWithFixed(loadField, memoryRead);
@@ -616,15 +617,7 @@
         } else if (n instanceof UnsafeLoadNode) {
             UnsafeLoadNode load = (UnsafeLoadNode) n;
             assert load.kind() != Kind.Illegal;
-            IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1);
-            // Unsafe Accesses to the metaspace or to any
-            // absolute address do not perform uncompression.
-            boolean compress = (!load.object().isNullConstant() && load.accessKind() == Kind.Object);
-            ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), WriteBarrierType.NONE, compress));
-            // An unsafe read must not floating outside its block as may float above an explicit
-            // null check on its object.
-            memoryRead.setGuard(AbstractBeginNode.prevBegin(load));
-            graph.replaceFixedWithFixed(load, memoryRead);
+            lowerUnsafeLoad(load);
         } else if (n instanceof UnsafeStoreNode) {
             UnsafeStoreNode store = (UnsafeStoreNode) n;
             IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, store.accessKind(), store.displacement(), store.offset(), graph, 1);
@@ -781,6 +774,10 @@
             if (tool.getLoweringType() == LoweringType.AFTER_GUARDS) {
                 monitorSnippets.lower((MonitorExitNode) n, tool);
             }
+        } else if (n instanceof G1PreWriteBarrier) {
+            writeBarrierSnippets.lower((G1PreWriteBarrier) n, tool);
+        } else if (n instanceof G1PostWriteBarrier) {
+            writeBarrierSnippets.lower((G1PostWriteBarrier) n, tool);
         } else if (n instanceof SerialWriteBarrier) {
             writeBarrierSnippets.lower((SerialWriteBarrier) n, tool);
         } else if (n instanceof SerialArrayRangeWriteBarrier) {
@@ -828,6 +825,107 @@
         return hub;
     }
 
+    public static long referentOffset() {
+        try {
+            return unsafe.objectFieldOffset(java.lang.ref.Reference.class.getDeclaredField("referent"));
+        } catch (Exception e) {
+            throw new GraalInternalError(e);
+        }
+    }
+
+    /**
+     * The following method lowers the unsafe load node. If any GC besides G1 is used, the unsafe
+     * load is lowered normally to a read node. However, if the G1 is used and the unsafe load could
+     * not be canonicalized to a load field, a runtime check has to be inserted in order to a add a
+     * g1-pre barrier if the loaded field is the referent field of the java.lang.ref.Reference
+     * class. The following code constructs the runtime check:
+     * 
+     * <pre>
+     * if (offset == referentOffset() && type == java.lang.ref.Reference) {
+     *     read;
+     *     G1PreWriteBarrier(read);
+     * } else {
+     *     read;
+     * }
+     * 
+     * </pre>
+     * 
+     * TODO (ck): Replace the code below with a snippet.
+     * 
+     */
+    private void lowerUnsafeLoad(UnsafeLoadNode load) {
+        StructuredGraph graph = load.graph();
+        boolean compress = (!load.object().isNullConstant() && load.accessKind() == Kind.Object);
+        if (config().useG1GC && load.object().kind() == Kind.Object && load.accessKind() == Kind.Object && !load.object().objectStamp().alwaysNull() && load.object().objectStamp().type() != null &&
+                        !(load.object().objectStamp().type().isArray())) {
+            IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1);
+            // Calculate offset+displacement
+            IntegerAddNode addNode = graph.add(new IntegerAddNode(Kind.Long, load.offset(), ConstantNode.forInt(load.displacement(), graph)));
+            // Compare previous result with referent offset (16)
+            CompareNode offsetCondition = CompareNode.createCompareNode(Condition.EQ, addNode, ConstantNode.forLong(referentOffset(), graph));
+            // Instance of unsafe load is java.lang.ref.Reference
+            InstanceOfNode instanceOfNode = graph.add(new InstanceOfNode(lookupJavaType(java.lang.ref.Reference.class), load.object(), null));
+            // The two barriers
+            ReadNode memoryReadNoBarrier = graph.add(new ReadNode(load.object(), location, load.stamp(), WriteBarrierType.NONE, compress));
+            ReadNode memoryReadBarrier = graph.add(new ReadNode(load.object(), location, load.stamp(), WriteBarrierType.PRECISE, compress));
+
+            // EndNodes
+            EndNode leftTrue = graph.add(new EndNode());
+            EndNode leftFalse = graph.add(new EndNode());
+            EndNode rightFirst = graph.add(new EndNode());
+            EndNode rightSecond = graph.add(new EndNode());
+
+            // MergeNodes
+            MergeNode mergeNoBarrier = graph.add(new MergeNode());
+            MergeNode mergeFinal = graph.add(new MergeNode());
+
+            // IfNodes
+            IfNode ifNodeType = graph.add(new IfNode(instanceOfNode, memoryReadBarrier, leftFalse, 0.1));
+            IfNode ifNodeOffset = graph.add(new IfNode(offsetCondition, ifNodeType, rightFirst, 0.1));
+
+            // Both branches are true (i.e. Add the barrier)
+            memoryReadBarrier.setNext(leftTrue);
+            mergeNoBarrier.addForwardEnd(rightFirst);
+            mergeNoBarrier.addForwardEnd(leftFalse);
+            mergeNoBarrier.setNext(memoryReadNoBarrier);
+            memoryReadNoBarrier.setNext(rightSecond);
+            mergeFinal.addForwardEnd(leftTrue);
+            mergeFinal.addForwardEnd(rightSecond);
+
+            PhiNode phiNode = graph.add(new PhiNode(load.accessKind(), mergeFinal));
+            phiNode.addInput(memoryReadBarrier);
+            phiNode.addInput(memoryReadNoBarrier);
+
+            // An unsafe read must not floating outside its block as may float above an explicit
+            // null check on its object.
+            memoryReadNoBarrier.setGuard(AbstractBeginNode.prevBegin(memoryReadNoBarrier));
+            memoryReadBarrier.setGuard(AbstractBeginNode.prevBegin(memoryReadBarrier));
+
+            assert load.successors().count() == 1;
+            Node next = load.successors().first();
+            load.replaceAndDelete(ifNodeOffset);
+            mergeFinal.setNext((FixedNode) next);
+            ifNodeOffset.replaceAtUsages(phiNode);
+        } else {
+            IndexedLocationNode location = IndexedLocationNode.create(ANY_LOCATION, load.accessKind(), load.displacement(), load.offset(), graph, 1);
+            // Unsafe Accesses to the metaspace or to any
+            // absolute address do not perform uncompression.
+            ReadNode memoryRead = graph.add(new ReadNode(load.object(), location, load.stamp(), WriteBarrierType.NONE, compress));
+            // An unsafe read must not floating outside its block as may float above an explicit
+            // null check on its object.
+            memoryRead.setGuard(AbstractBeginNode.prevBegin(load));
+            graph.replaceFixedWithFixed(load, memoryRead);
+        }
+    }
+
+    private static WriteBarrierType getFieldLoadBarrierType(HotSpotResolvedJavaField loadField) {
+        WriteBarrierType barrierType = WriteBarrierType.NONE;
+        if (config().useG1GC && loadField.getKind() == Kind.Object && loadField.getDeclaringClass().mirror() == java.lang.ref.Reference.class && loadField.getName().equals("referent")) {
+            barrierType = WriteBarrierType.PRECISE;
+        }
+        return barrierType;
+    }
+
     private static WriteBarrierType getFieldStoreBarrierType(StoreFieldNode storeField) {
         WriteBarrierType barrierType = WriteBarrierType.NONE;
         if (storeField.field().getKind() == Kind.Object && !storeField.value().objectStamp().alwaysNull()) {
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierAdditionPhase.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/phases/WriteBarrierAdditionPhase.java	Mon Jun 24 11:56:24 2013 +0200
@@ -22,6 +22,8 @@
  */
 package com.oracle.graal.hotspot.phases;
 
+import com.oracle.graal.graph.*;
+import com.oracle.graal.hotspot.replacements.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.HeapAccess.WriteBarrierType;
 import com.oracle.graal.nodes.extended.*;
@@ -35,6 +37,11 @@
 
     @Override
     protected void run(StructuredGraph graph) {
+        for (ReadNode node : graph.getNodes(ReadNode.class)) {
+            if (node.getWriteBarrierType() == WriteBarrierType.PRECISE) {
+                addReadNodeBarriers(node, graph);
+            }
+        }
         for (WriteNode node : graph.getNodes(WriteNode.class)) {
             addWriteNodeBarriers(node, graph);
         }
@@ -48,12 +55,27 @@
         }
     }
 
+    private static void addReadNodeBarriers(ReadNode node, StructuredGraph graph) {
+        assert HotSpotReplacementsUtil.useG1GC();
+        graph.addAfterFixed(node, graph.add(new G1PreWriteBarrier(node.object(), node, node.location(), false)));
+    }
+
     private static void addWriteNodeBarriers(WriteNode node, StructuredGraph graph) {
         WriteBarrierType barrierType = node.getWriteBarrierType();
         if (barrierType == WriteBarrierType.PRECISE) {
-            graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), true)));
+            if (HotSpotReplacementsUtil.useG1GC()) {
+                graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), null, node.location(), true)));
+                graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.value(), node.location(), true)));
+            } else {
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), true)));
+            }
         } else if (barrierType == WriteBarrierType.IMPRECISE) {
-            graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), false)));
+            if (HotSpotReplacementsUtil.useG1GC()) {
+                graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), null, node.location(), true)));
+                graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.value(), node.location(), false)));
+            } else {
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.location(), false)));
+            }
         } else {
             assert barrierType == WriteBarrierType.NONE;
         }
@@ -63,17 +85,31 @@
     private static void addCASBarriers(CompareAndSwapNode node, StructuredGraph graph) {
         WriteBarrierType barrierType = node.getWriteBarrierType();
         if (barrierType == WriteBarrierType.PRECISE) {
-            graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), true)));
+            if (HotSpotReplacementsUtil.useG1GC()) {
+                graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), node.expected(), node.getLocation(), false)));
+                graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.newValue(), node.getLocation(), true)));
+            } else {
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), true)));
+            }
         } else if (barrierType == WriteBarrierType.IMPRECISE) {
-            graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), false)));
+            if (HotSpotReplacementsUtil.useG1GC()) {
+                graph.addBeforeFixed(node, graph.add(new G1PreWriteBarrier(node.object(), node.expected(), node.getLocation(), false)));
+                graph.addAfterFixed(node, graph.add(new G1PostWriteBarrier(node.object(), node.newValue(), node.getLocation(), false)));
+            } else {
+                graph.addAfterFixed(node, graph.add(new SerialWriteBarrier(node.object(), node.getLocation(), false)));
+            }
         } else {
             assert barrierType == WriteBarrierType.NONE;
         }
     }
 
     private static void addArrayRangeBarriers(ArrayRangeWriteNode node, StructuredGraph graph) {
-        SerialArrayRangeWriteBarrier serialArrayRangeWriteBarrier = graph.add(new SerialArrayRangeWriteBarrier(node.getArray(), node.getIndex(), node.getLength()));
-        graph.addAfterFixed(node, serialArrayRangeWriteBarrier);
+        if (HotSpotReplacementsUtil.useG1GC()) {
+            throw new GraalInternalError("G1 does not yet suppot barriers for ArrayCopy Intrinsics. Run with -G:-IntrinsifyArrayCopy");
+        } else {
+            SerialArrayRangeWriteBarrier serialArrayRangeWriteBarrier = graph.add(new SerialArrayRangeWriteBarrier(node.getArray(), node.getIndex(), node.getLength()));
+            graph.addAfterFixed(node, serialArrayRangeWriteBarrier);
+        }
     }
 
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/replacements/WriteBarrierSnippets.java	Mon Jun 24 11:56:24 2013 +0200
@@ -83,10 +83,114 @@
         }
     }
 
+    /**
+     * Log method of debugging purposes.
+     */
+    static void log(boolean enabled, String format, WordBase value) {
+        if (enabled) {
+            Log.printf(format, value.rawValue());
+        }
+    }
+
+    @Snippet
+    public static void g1PreWriteBarrier(Object object, Object expectedObject, Object location, @ConstantParameter boolean doLoad) {
+        Word thread = thread();
+        Object fixedObject = FixedValueAnchorNode.getObject(object);
+        Object fixedExpectedObject = FixedValueAnchorNode.getObject(expectedObject);
+        Word field = (Word) Word.fromArray(fixedObject, location);
+        Word previousOop = (Word) Word.fromObject(fixedExpectedObject);
+        byte markingValue = thread.readByte(HotSpotReplacementsUtil.g1SATBQueueMarkingOffset());
+        Word bufferAddress = thread.readWord(HotSpotReplacementsUtil.g1SATBQueueBufferOffset());
+        Word indexAddress = thread.add(HotSpotReplacementsUtil.g1SATBQueueIndexOffset());
+        Word indexValue = indexAddress.readWord(0);
+
+        // If the concurrent marker is enabled, the barrier is issued.
+        if (markingValue != (byte) 0) {
+            // If the previous value has to be loaded (before the write), the load is issued.
+            // The load is always issued except the cases of CAS and referent field.
+            if (doLoad) {
+                previousOop = (Word) Word.fromObject(field.readObject(0));
+            }
+            // If the previous value is null the barrier should not be issued.
+            if (previousOop.notEqual(0)) {
+                // If the thread-local SATB buffer is full issue a native call which will
+                // initialize a new one and add the entry.
+                if (indexValue.notEqual(0)) {
+                    Word nextIndex = indexValue.subtract(HotSpotReplacementsUtil.wordSize());
+                    Word logAddress = bufferAddress.add(nextIndex);
+                    // Log the object to be marked as well as update the SATB's buffer next index.
+                    logAddress.writeWord(0, previousOop);
+                    indexAddress.writeWord(0, nextIndex);
+                } else {
+                    G1PreWriteBarrierStubCall.call(previousOop.toObject());
+                }
+            }
+        }
+    }
+
+    @Snippet
+    public static void g1PostWriteBarrier(Object object, Object value, Object location, @ConstantParameter boolean usePrecise) {
+        Word thread = thread();
+        Object fixedObject = FixedValueAnchorNode.getObject(object);
+        Object fixedValue = FixedValueAnchorNode.getObject(value);
+        Word oop = (Word) Word.fromObject(fixedObject);
+        Word field;
+        if (usePrecise) {
+            field = (Word) Word.fromArray(fixedObject, location);
+        } else {
+            field = oop;
+        }
+
+        Word writtenValue = (Word) Word.fromObject(fixedValue);
+        Word bufferAddress = thread.readWord(HotSpotReplacementsUtil.g1CardQueueBufferOffset());
+        Word indexAddress = thread.add(HotSpotReplacementsUtil.g1CardQueueIndexOffset());
+        Word indexValue = thread.readWord(HotSpotReplacementsUtil.g1CardQueueIndexOffset());
+        // The result of the xor reveals whether the installed pointer crosses heap regions.
+        // In case it does the write barrier has to be issued.
+        Word xorResult = (field.xor(writtenValue)).unsignedShiftRight(HotSpotReplacementsUtil.logOfHRGrainBytes());
+
+        // Calculate the address of the card to be enqueued to the
+        // thread local card queue.
+        Word cardBase = field.unsignedShiftRight(cardTableShift());
+        long startAddress = cardTableStart();
+        int displacement = 0;
+        if (((int) startAddress) == startAddress) {
+            displacement = (int) startAddress;
+        } else {
+            cardBase = cardBase.add(Word.unsigned(cardTableStart()));
+        }
+        Word cardAddress = cardBase.add(displacement);
+
+        if (xorResult.notEqual(0)) {
+            // If the written value is not null continue with the barrier addition.
+            if (writtenValue.notEqual(0)) {
+                byte cardByte = cardAddress.readByte(0);
+                // If the card is already dirty, (hence already enqueued) skip the insertion.
+                if (cardByte != (byte) 0) {
+                    cardAddress.writeByte(0, (byte) 0);
+                    // If the thread local card queue is full, issue a native call which will
+                    // initialize a new one and add the card entry.
+                    if (indexValue.notEqual(0)) {
+                        Word nextIndex = indexValue.subtract(HotSpotReplacementsUtil.wordSize());
+                        Word logAddress = bufferAddress.add(nextIndex);
+                        // Log the object to be scanned as well as update
+                        // the card queue's next index.
+                        logAddress.writeWord(0, cardAddress);
+                        indexAddress.writeWord(0, nextIndex);
+                    } else {
+                        G1PostWriteBarrierStubCall.call(cardAddress);
+                    }
+                }
+            }
+        }
+    }
+
     public static class Templates extends AbstractTemplates {
 
         private final SnippetInfo serialArrayWriteBarrier = snippet(WriteBarrierSnippets.class, "serialArrayWriteBarrier");
         private final SnippetInfo serialArrayRangeWriteBarrier = snippet(WriteBarrierSnippets.class, "serialArrayRangeWriteBarrier");
+        private final SnippetInfo g1PreWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PreWriteBarrier");
+        private final SnippetInfo g1PostWriteBarrier = snippet(WriteBarrierSnippets.class, "g1PostWriteBarrier");
 
         public Templates(CodeCacheProvider runtime, Replacements replacements, TargetDescription target) {
             super(runtime, replacements, target);
@@ -107,5 +211,23 @@
             args.add("length", arrayRangeWriteBarrier.getLength());
             template(args).instantiate(runtime, arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
         }
+
+        public void lower(G1PreWriteBarrier writeBarrierPre, @SuppressWarnings("unused") LoweringTool tool) {
+            Arguments args = new Arguments(g1PreWriteBarrier);
+            args.add("object", writeBarrierPre.getObject());
+            args.add("expectedObject", writeBarrierPre.getExpectedObject());
+            args.add("location", writeBarrierPre.getLocation());
+            args.addConst("doLoad", writeBarrierPre.doLoad());
+            template(args).instantiate(runtime, writeBarrierPre, DEFAULT_REPLACER, args);
+        }
+
+        public void lower(G1PostWriteBarrier writeBarrierPost, @SuppressWarnings("unused") LoweringTool tool) {
+            Arguments args = new Arguments(g1PostWriteBarrier);
+            args.add("object", writeBarrierPost.getObject());
+            args.add("value", writeBarrierPost.getValue());
+            args.add("location", writeBarrierPost.getLocation());
+            args.addConst("usePrecise", writeBarrierPost.usePrecise());
+            template(args).instantiate(runtime, writeBarrierPost, DEFAULT_REPLACER, args);
+        }
     }
 }
--- a/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.hotspot/src/com/oracle/graal/hotspot/stubs/NewInstanceStub.java	Mon Jun 24 11:56:24 2013 +0200
@@ -121,6 +121,11 @@
      *         operation was unsuccessful
      */
     static Word refillAllocate(Word intArrayHub, int sizeInBytes, boolean log) {
+        // If G1 is enabled, the "eden" allocation space is not the same always
+        // and therefore we have to go to slowpath to allocate a new TLAB.
+        if (HotSpotReplacementsUtil.useG1GC()) {
+            return Word.zero();
+        }
         if (!useTLAB()) {
             return edenAllocate(Word.unsigned(sizeInBytes), log);
         }
--- a/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/FloatableAccessNode.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.nodes/src/com/oracle/graal/nodes/extended/FloatableAccessNode.java	Mon Jun 24 11:56:24 2013 +0200
@@ -22,6 +22,7 @@
  */
 package com.oracle.graal.nodes.extended;
 
+import com.oracle.graal.api.meta.*;
 import com.oracle.graal.nodes.*;
 import com.oracle.graal.nodes.type.*;
 
@@ -43,4 +44,13 @@
     }
 
     public abstract FloatingAccessNode asFloatingNode(ValueNode lastLocationAccess);
+
+    /**
+     * AccessNodes can float only if their location identities are not ANY_LOCATION. Furthermore, in
+     * case G1 is enabled any access (read) to the java.lang.ref.Reference.referent field which has
+     * an attached write barrier with pre-semantics can not also float.
+     */
+    public boolean canFloat() {
+        return location().getLocationIdentity() != LocationIdentity.ANY_LOCATION && getWriteBarrierType() == WriteBarrierType.NONE;
+    }
 }
--- a/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/FloatingReadPhase.java	Mon Jun 24 11:43:48 2013 +0200
+++ b/graal/com.oracle.graal.phases.common/src/com/oracle/graal/phases/common/FloatingReadPhase.java	Mon Jun 24 11:56:24 2013 +0200
@@ -175,7 +175,7 @@
             StructuredGraph graph = accessNode.graph();
             assert accessNode.getNullCheck() == false;
             LocationIdentity locationIdentity = accessNode.location().getLocationIdentity();
-            if (locationIdentity != ANY_LOCATION) {
+            if (accessNode.canFloat()) {
                 ValueNode lastLocationAccess = state.getLastLocationAccess(locationIdentity);
                 FloatingAccessNode floatingNode = accessNode.asFloatingNode(lastLocationAccess);
                 floatingNode.setNullCheck(accessNode.getNullCheck());