OpenJDK / portola / portola
changeset 49493:1f9dd2360b17
Merge
author | jwilhelm |
---|---|
date | Sat, 24 Mar 2018 01:08:35 +0100 |
parents | a11d3a5ca20b bde392011cd8 |
children | f5e614a1ed98 |
files | make/autoconf/hotspot.m4 src/hotspot/share/gc/cms/cmsCardTable.cpp src/hotspot/share/gc/cms/parCardTableModRefBS.cpp src/hotspot/share/gc/g1/g1BarrierSet.cpp src/hotspot/share/gc/g1/g1BarrierSet.hpp src/hotspot/share/gc/g1/g1Policy.hpp src/hotspot/share/gc/parallel/psCardTable.cpp src/hotspot/share/gc/shared/barrierSet.inline.hpp src/hotspot/share/gc/shared/cardTable.hpp src/hotspot/share/gc/shared/cardTableBarrierSet.cpp src/hotspot/share/gc/shared/cardTableBarrierSet.hpp src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp src/hotspot/share/gc/shared/cardTableModRefBS.cpp src/hotspot/share/gc/shared/cardTableModRefBS.hpp src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp src/hotspot/share/prims/jvmtiEnter.hpp src/hotspot/share/prims/jvmtiEnter.inline.hpp src/hotspot/share/runtime/interfaceSupport.hpp src/hotspot/share/runtime/interfaceSupport.inline.hpp src/hotspot/share/services/serviceUtil.hpp src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/TraceInliningMode.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotArithmeticLIRGenerator.java |
diffstat | 737 files changed, 13751 insertions(+), 6426 deletions(-) [+] |
line wrap: on
line diff
--- a/make/CompileToolsHotspot.gmk Thu Mar 29 20:12:02 2018 +0100 +++ b/make/CompileToolsHotspot.gmk Sat Mar 24 01:08:35 2018 +0100 @@ -120,6 +120,7 @@ SRC := \ $(SRC_DIR)/org.graalvm.word/src \ $(SRC_DIR)/org.graalvm.collections/src \ + $(SRC_DIR)/org.graalvm.compiler.bytecode/src \ $(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \ $(SRC_DIR)/org.graalvm.compiler.api.replacements/src \ $(SRC_DIR)/org.graalvm.compiler.code/src \
--- a/make/autoconf/hotspot.m4 Thu Mar 29 20:12:02 2018 +0100 +++ b/make/autoconf/hotspot.m4 Sat Mar 24 01:08:35 2018 +0100 @@ -343,11 +343,10 @@ fi INCLUDE_GRAAL="true" else - # By default enable graal build on linux-x64 or where AOT is available. + # By default enable graal build on x64 or where AOT is available. # graal build requires jvmci. if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \ - (test "x$OPENJDK_TARGET_CPU" = "xx86_64" && \ - test "x$OPENJDK_TARGET_OS" = "xlinux" || \ + (test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \ test "x$ENABLE_AOT" = "xtrue") ; then AC_MSG_RESULT([yes]) JVM_FEATURES_graal="graal"
--- a/make/nb_native/nbproject/configurations.xml Thu Mar 29 20:12:02 2018 +0100 +++ b/make/nb_native/nbproject/configurations.xml Sat Mar 24 01:08:35 2018 +0100 @@ -2480,7 +2480,7 @@ <in>jvmtiClassFileReconstituter.hpp</in> <in>jvmtiCodeBlobEvents.cpp</in> <in>jvmtiCodeBlobEvents.hpp</in> - <in>jvmtiEnter.hpp</in> + <in>jvmtiEnter.inline.hpp</in> <in>jvmtiEnv.cpp</in> <in>jvmtiEnvBase.cpp</in> <in>jvmtiEnvBase.hpp</in> @@ -13398,7 +13398,7 @@ tool="3" flavor2="0"> </item> - <item path="../../src/hotspot/share/prims/jvmtiEnter.hpp" + <item path="../../src/hotspot/share/prims/jvmtiEnter.inline.hpp" ex="false" tool="3" flavor2="0"> @@ -27175,7 +27175,7 @@ tool="3" flavor2="0"> </item> - <item path="../../src/hotspot/share/prims/jvmtiEnter.hpp" + <item path="../../src/hotspot/share/prims/jvmtiEnter.inline.hpp" ex="false" tool="3" flavor2="0">
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Sat Mar 24 01:08:35 2018 +0100 @@ -996,7 +996,7 @@ source_hpp %{ #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "opto/addnode.hpp" class CallStubImpl { @@ -5845,8 +5845,8 @@ operand immByteMapBase() %{ // Get base of card map - predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) && - (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base()); + predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet) && + (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base()); match(ConP); op_cost(0);
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -36,7 +36,7 @@ #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" // for the moment we reuse the logical/floating point immediate encode
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -34,10 +34,11 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_aarch64.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_aarch64.inline.hpp" @@ -2174,8 +2175,8 @@ __ stp(length, src_pos, Address(sp, 2*BytesPerWord)); __ str(src, Address(sp, 4*BytesPerWord)); - address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); address copyfunc_addr = StubRoutines::generic_arraycopy(); + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); // The arguments are in java calling convention so we shift them // to C convention @@ -2188,17 +2189,12 @@ assert_different_registers(c_rarg3, j_rarg4); __ mov(c_rarg3, j_rarg3); __ mov(c_rarg4, j_rarg4); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ mov(rscratch1, RuntimeAddress(C_entry)); - __ blrt(rscratch1, 5, 0, 1); - } else { #ifndef PRODUCT - if (PrintC1Statistics) { - __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); - } + if (PrintC1Statistics) { + __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } #endif - __ far_call(RuntimeAddress(copyfunc_addr)); - } + __ far_call(RuntimeAddress(copyfunc_addr)); __ cbz(r0, *stub->continuation()); @@ -2208,14 +2204,12 @@ __ ldp(length, src_pos, Address(sp, 2*BytesPerWord)); __ ldr(src, Address(sp, 4*BytesPerWord)); - if (copyfunc_addr != NULL) { - // r0 is -1^K where K == partial copied count - __ eonw(rscratch1, r0, 0); - // adjust length down and src/end pos up by partial copied count - __ subw(length, length, rscratch1); - __ addw(src_pos, src_pos, rscratch1); - __ addw(dst_pos, dst_pos, rscratch1); - } + // r0 is -1^K where K == partial copied count + __ eonw(rscratch1, r0, 0); + // adjust length down and src/end pos up by partial copied count + __ subw(length, length, rscratch1); + __ addw(src_pos, src_pos, rscratch1); + __ addw(dst_pos, dst_pos, rscratch1); __ b(*stub->entry()); __ bind(*stub->continuation());
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,7 +31,7 @@ #include "c1/c1_Runtime1.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -65,7 +65,7 @@ // Peephole and CISC spilling both break the graph, and so makes the // scheduler sick. define_pd_global(bool, OptoPeephole, false); -define_pd_global(bool, UseCISCSpill, true); +define_pd_global(bool, UseCISCSpill, false); define_pd_global(bool, OptoScheduling, false); define_pd_global(bool, OptoBundling, false); define_pd_global(bool, OptoRegScheduling, false);
--- a/src/hotspot/cpu/aarch64/frame_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/frame_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -158,4 +158,6 @@ // deoptimization support void interpreter_frame_set_last_sp(intptr_t* sp); + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_AARCH64_VM_FRAME_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -227,9 +227,6 @@ } -inline jint frame::interpreter_frame_expression_stack_direction() { return -1; } - - // Entry frames inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "runtime/thread.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, RegSet saved_regs) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + if (!dest_uninitialized) { + __ push(saved_regs, sp); + if (count == c_rarg0) { + if (addr == c_rarg1) { + // exactly backwards!! + __ mov(rscratch1, c_rarg0); + __ mov(c_rarg0, c_rarg1); + __ mov(c_rarg1, rscratch1); + } else { + __ mov(c_rarg1, count); + __ mov(c_rarg0, addr); + } + } else { + __ mov(c_rarg0, addr); + __ mov(c_rarg1, count); + } + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2); + } + __ pop(saved_regs, sp); + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register end, Register scratch, RegSet saved_regs) { + __ push(saved_regs, sp); + // must compute element count unless barrier set interface is changed (other platforms supply count) + assert_different_registers(start, end, scratch); + __ lea(scratch, Address(end, BytesPerHeapOop)); + __ sub(scratch, scratch, start); // subtract start to get #bytes + __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count + __ mov(c_rarg0, start); + __ mov(c_rarg1, scratch); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2); + __ pop(saved_regs, sp); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, RegSet saved_regs); + void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register end, Register tmp, RegSet saved_regs); +}; + +#endif // CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class BarrierSetAssembler: public CHeapObj<mtGC> { +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, RegSet saved_regs) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register start, Register end, Register tmp, RegSet saved_regs) {} +}; + +#endif // CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register end, Register scratch, RegSet saved_regs) { + + BarrierSet* bs = Universe::heap()->barrier_set(); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + Label L_loop; + + __ lsr(start, start, CardTable::card_shift); + __ lsr(end, end, CardTable::card_shift); + __ sub(end, end, start); // number of bytes to copy + + const Register count = end; // 'end' register contains bytes count now + __ load_byte_map_base(scratch); + __ add(start, start, scratch); + if (UseConcMarkSweepGC) { + __ membar(__ StoreStore); + } + __ bind(L_loop); + __ strb(zr, Address(start, count)); + __ subs(count, count, 1); + __ br(Assembler::GE, L_loop); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register end, Register tmp, RegSet saved_regs); +}; + +#endif // #ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, RegSet saved_regs) { + + if (is_oop) { + gen_write_ref_array_pre_barrier(masm, decorators, addr, count, saved_regs); + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register start, Register end, Register tmp, + RegSet saved_regs) { + if (is_oop) { + gen_write_ref_array_post_barrier(masm, decorators, start, end, tmp, saved_regs); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP +#define CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, RegSet saved_regs) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register start, Register end, Register tmp, RegSet saved_regs) {} + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, RegSet saved_regs); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register start, Register end, Register tmp, RegSet saved_regs); +}; + +#endif // CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -36,6 +36,7 @@ #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -32,7 +33,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" #define __ _masm-> @@ -42,6 +43,14 @@ Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; } Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; } +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( + const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); + _num_int_args = (method->is_static() ? 1 : 0); + _num_fp_args = 0; + _stack_offset = 0; +} + void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,8 +26,8 @@ #ifndef CPU_AARCH64_VM_INTERPRETERRT_AARCH64_HPP #define CPU_AARCH64_VM_INTERPRETERRT_AARCH64_HPP -#include "asm/macroAssembler.hpp" -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. // native method calls @@ -47,12 +47,7 @@ public: // Creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); - _num_int_args = (method->is_static() ? 1 : 0); - _num_fp_args = 0; - _stack_offset = 0; - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // Code generation void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciJavaClasses.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_aarch64.inline.hpp"
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -30,7 +30,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "compiler/disassembler.hpp" #include "memory/resourceArea.hpp" @@ -42,7 +42,7 @@ #include "opto/node.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.hpp" @@ -3618,10 +3618,10 @@ // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -4129,7 +4129,7 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -4515,7 +4515,7 @@ void MacroAssembler::load_byte_map_base(Register reg) { jbyte *byte_map_base = - ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base(); + ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base(); if (is_valid_AArch64_address((address)byte_map_base)) { // Strictly speaking the byte_map_base isn't an address at all,
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -79,8 +79,8 @@ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); - // Maximum size of class area in Metaspace when compressed - uint64_t use_XOR_for_compressed_class_base; + // True if an XOR can be used to expand narrow klass references. + bool use_XOR_for_compressed_class_base; public: MacroAssembler(CodeBuffer* code) : Assembler(code) { @@ -88,7 +88,7 @@ = (operand_valid_for_logical_immediate(false /*is32*/, (uint64_t)Universe::narrow_klass_base()) && ((uint64_t)Universe::narrow_klass_base() - > (1u << log2_intptr(CompressedClassSpaceSize)))); + > (1UL << log2_intptr(Universe::narrow_klass_range())))); } // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,7 @@ #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" #define __ _masm->
--- a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,7 +31,7 @@ #include "code/vmreg.hpp" #include "interpreter/interpreter.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -34,6 +34,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,8 +26,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_aarch64.hpp" #include "oops/instanceOop.hpp" @@ -620,111 +620,6 @@ void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); } - // Generate code for an array write pre barrier - // - // addr - starting address - // count - element count - // tmp - scratch register - // saved_regs - registers to be saved before calling static_write_ref_array_pre - // - // Callers must specify which registers to preserve in saved_regs. - // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs. - // - void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) { - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target in uninitialized - if (!dest_uninitialized) { - __ push(saved_regs, sp); - if (count == c_rarg0) { - if (addr == c_rarg1) { - // exactly backwards!! - __ mov(rscratch1, c_rarg0); - __ mov(c_rarg0, c_rarg1); - __ mov(c_rarg1, rscratch1); - } else { - __ mov(c_rarg1, count); - __ mov(c_rarg0, addr); - } - } else { - __ mov(c_rarg0, addr); - __ mov(c_rarg1, count); - } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); - __ pop(saved_regs, sp); - break; - case BarrierSet::CardTableModRef: - break; - default: - ShouldNotReachHere(); - - } - } - } - - // - // Generate code for an array write post barrier - // - // Input: - // start - register containing starting address of destination array - // end - register containing ending address of destination array - // scratch - scratch register - // saved_regs - registers to be saved before calling static_write_ref_array_post - // - // The input registers are overwritten. - // The ending address is inclusive. - // Callers must specify which registers to preserve in saved_regs. - // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs. - void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) { - assert_different_registers(start, end, scratch); - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - - { - __ push(saved_regs, sp); - // must compute element count unless barrier set interface is changed (other platforms supply count) - assert_different_registers(start, end, scratch); - __ lea(scratch, Address(end, BytesPerHeapOop)); - __ sub(scratch, scratch, start); // subtract start to get #bytes - __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count - __ mov(c_rarg0, start); - __ mov(c_rarg1, scratch); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); - __ pop(saved_regs, sp); - } - break; - case BarrierSet::CardTableModRef: - { - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - - Label L_loop; - - __ lsr(start, start, CardTable::card_shift); - __ lsr(end, end, CardTable::card_shift); - __ sub(end, end, start); // number of bytes to copy - - const Register count = end; // 'end' register contains bytes count now - __ load_byte_map_base(scratch); - __ add(start, start, scratch); - if (UseConcMarkSweepGC) { - __ membar(__ StoreStore); - } - __ BIND(L_loop); - __ strb(zr, Address(start, count)); - __ subs(count, count, 1); - __ br(Assembler::GE, L_loop); - } - break; - default: - ShouldNotReachHere(); - - } - } - // The inner part of zero_words(). This is the bulk operation, // zeroing words in blocks, possibly using DC ZVA to do it. The // caller is responsible for zeroing the last few words. @@ -1456,20 +1351,33 @@ BLOCK_COMMENT("Entry:"); } + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg); + if (is_oop) { - gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg); // save regs before copy_memory __ push(RegSet::of(d, count), sp); } copy_memory(aligned, s, d, count, rscratch1, size); + if (is_oop) { __ pop(RegSet::of(d, count), sp); if (VerifyOops) verify_oop_array(size, d, count, r16); __ sub(count, count, 1); // make an inclusive end pointer __ lea(count, Address(d, count, Address::lsl(exact_log2(size)))); - gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet()); } + + bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet()); + __ leave(); __ mov(r0, zr); // return 0 __ ret(lr); @@ -1517,8 +1425,18 @@ __ cmp(rscratch1, count, Assembler::LSL, exact_log2(size)); __ br(Assembler::HS, nooverlap_target); + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs); + if (is_oop) { - gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs); // save regs before copy_memory __ push(RegSet::of(d, count), sp); } @@ -1529,8 +1447,8 @@ verify_oop_array(size, d, count, r16); __ sub(count, count, 1); // make an inclusive end pointer __ lea(count, Address(d, count, Address::lsl(exact_log2(size)))); - gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet()); } + bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet()); __ leave(); __ mov(r0, zr); // return 0 __ ret(lr); @@ -1871,7 +1789,14 @@ } #endif //ASSERT - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs); + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + bool is_oop = true; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs); // save the original count __ mov(count_save, count); @@ -1915,7 +1840,7 @@ __ BIND(L_do_card_marks); __ add(to, to, -heapOopSize); // make an inclusive end pointer - gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs); + bs->arraycopy_epilogue(_masm, decorators, is_oop, start_to, to, rscratch1, wb_post_saved_regs); __ bind(L_done_pop); __ pop(RegSet::of(r18, r19, r20, r21), sp);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -35,6 +35,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -184,7 +185,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val == noreg) { __ store_heap_oop_null(obj); @@ -1904,7 +1905,8 @@ in_bytes(InvocationCounter::counter_offset())); const Address mask(r1, in_bytes(MethodData::backedge_mask_offset())); __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, - r0, rscratch1, false, Assembler::EQ, &backedge_counter_overflow); + r0, rscratch1, false, Assembler::EQ, + UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); __ b(dispatch); } __ bind(no_mdo); @@ -1912,7 +1914,8 @@ __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset())); const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset())); __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask, - r0, rscratch2, false, Assembler::EQ, &backedge_counter_overflow); + r0, rscratch2, false, Assembler::EQ, + UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); } else { // not TieredCompilation // increment counter __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset())); @@ -1960,8 +1963,8 @@ } } } + __ bind(dispatch); } - __ bind(dispatch); // Pre-load the next target bytecode into rscratch1 __ load_unsigned_byte(rscratch1, Address(rbcp, 0)); @@ -1981,7 +1984,7 @@ __ b(dispatch); } - if (TieredCompilation || UseOnStackReplacement) { + if (UseOnStackReplacement) { // invocation counter overflow __ bind(backedge_counter_overflow); __ neg(r2, r2); @@ -1991,11 +1994,6 @@ CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), r2); - if (!UseOnStackReplacement) - __ b(dispatch); - } - - if (UseOnStackReplacement) { __ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode // r0: osr nmethod (osr ok) or NULL (osr not possible)
--- a/src/hotspot/cpu/arm/assembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" @@ -35,7 +35,7 @@ #include "prims/jvm_misc.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/assembler_arm_32.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" @@ -35,7 +35,7 @@ #include "prims/jvm_misc.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/assembler_arm_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,7 +26,7 @@ #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" #include "ci/ciEnv.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" @@ -35,7 +35,7 @@ #include "prims/jvm_misc.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/c1_Defs_arm.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_Defs_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -79,7 +79,7 @@ #else #define PATCHED_ADDR (204) #endif -#define CARDTABLEMODREF_POST_BARRIER_HELPER +#define CARDTABLEBARRIERSET_POST_BARRIER_HELPER #define GENERATE_ADDRESS_IS_PREFERRED #endif // CPU_ARM_VM_C1_DEFS_ARM_HPP
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,10 +31,11 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_arm.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_arm.inline.hpp" @@ -2777,17 +2778,14 @@ #endif // AARCH64 address copyfunc_addr = StubRoutines::generic_arraycopy(); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ call(CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); - } else { + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); #ifndef PRODUCT - if (PrintC1Statistics) { - __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); - } + if (PrintC1Statistics) { + __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2); + } #endif // !PRODUCT - // the stub is in the code cache so close enough - __ call(copyfunc_addr, relocInfo::runtime_call_type); - } + // the stub is in the code cache so close enough + __ call(copyfunc_addr, relocInfo::runtime_call_type); #ifdef AARCH64 __ raw_pop(length, ZR); @@ -2797,15 +2795,11 @@ __ cbz_32(R0, *stub->continuation()); - if (copyfunc_addr != NULL) { - __ mvn_32(tmp, R0); - restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only - __ sub_32(length, length, tmp); - __ add_32(src_pos, src_pos, tmp); - __ add_32(dst_pos, dst_pos, tmp); - } else { - restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only - } + __ mvn_32(tmp, R0); + restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only + __ sub_32(length, length, tmp); + __ add_32(src_pos, src_pos, tmp); + __ add_32(dst_pos, dst_pos, tmp); __ b(*stub->entry());
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -35,7 +35,7 @@ #include "ci/ciTypeArrayKlass.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "vmreg_arm.inline.hpp" @@ -497,7 +497,7 @@ #endif // AARCH64 } -void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { +void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) { assert(addr->is_register(), "must be a register at this point"); LIR_Opr tmp = FrameMap::LR_ptr_opr;
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -30,7 +30,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/arm/frame_arm.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/frame_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,4 +135,6 @@ // helper to update a map with callee-saved FP static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr); + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_ARM_VM_FRAME_ARM_HPP
--- a/src/hotspot/cpu/arm/frame_arm.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/frame_arm.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -218,9 +218,6 @@ } -inline jint frame::interpreter_frame_expression_stack_direction() { return -1; } - - // Entry frames inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/thread.hpp" +#include "utilities/macros.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, int callee_saved_regs) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + if (!dest_uninitialized) { + assert( addr->encoding() < callee_saved_regs, "addr must be saved"); + assert(count->encoding() < callee_saved_regs, "count must be saved"); + + BLOCK_COMMENT("PreBarrier"); + +#ifdef AARCH64 + callee_saved_regs = align_up(callee_saved_regs, 2); + for (int i = 0; i < callee_saved_regs; i += 2) { + __ raw_push(as_Register(i), as_Register(i+1)); + } +#else + RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1)); + __ push(saved_regs | R9ifScratched); +#endif // AARCH64 + + if (addr != R0) { + assert_different_registers(count, R0); + __ mov(R0, addr); + } +#ifdef AARCH64 + __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t +#else + if (count != R1) { + __ mov(R1, count); + } +#endif // AARCH64 + + if (UseCompressedOops) { + __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)); + } else { + __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry)); + } + +#ifdef AARCH64 + for (int i = callee_saved_regs - 2; i >= 0; i -= 2) { + __ raw_pop(as_Register(i), as_Register(i+1)); + } +#else + __ pop(saved_regs | R9ifScratched); +#endif // AARCH64 + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + + BLOCK_COMMENT("G1PostBarrier"); + if (addr != R0) { + assert_different_registers(count, R0); + __ mov(R0, addr); + } +#ifdef AARCH64 + __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t +#else + if (count != R1) { + __ mov(R1, count); + } +#if R9_IS_SCRATCHED + // Safer to save R9 here since callers may have been written + // assuming R9 survives. This is suboptimal but is not in + // general worth optimizing for the few platforms where R9 + // is scratched. Note that the optimization might not be to + // difficult for this particular call site. + __ push(R9); +#endif // !R9_IS_SCRATCHED +#endif // !AARCH64 + __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry)); +#ifndef AARCH64 +#if R9_IS_SCRATCHED + __ pop(R9); +#endif // !R9_IS_SCRATCHED +#endif // !AARCH64 +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP +#define CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, , int callee_saved_regs); + void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp); +}; + +#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP +#define CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class BarrierSetAssembler: public CHeapObj<mtGC> { +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, , int callee_saved_regs) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, Register tmp) {} +}; + +#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "runtime/globals.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + BLOCK_COMMENT("CardTablePostBarrier"); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + Label L_cardtable_loop, L_done; + + __ cbz_32(count, L_done); // zero count - nothing to do + + __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop); + __ sub(count, count, BytesPerHeapOop); // last addr + + __ logical_shift_right(addr, addr, CardTable::card_shift); + __ logical_shift_right(count, count, CardTable::card_shift); + __ sub(count, count, addr); // nb of cards + + // warning: Rthread has not been preserved + __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference); + __ add(addr,tmp, addr); + + Register zero = __ zero_register(tmp); + + __ BIND(L_cardtable_loop); + __ strb(zero, Address(addr, 1, post_indexed)); + __ subs(count, count, 1); + __ b(L_cardtable_loop, ge); + __ BIND(L_done); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP +#define CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp); +}; + +#endif // #ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, int callee_saved_regs) { + + if (is_oop) { + gen_write_ref_array_pre_barrier(masm, decorators, addr, count, callee_saved_regs); + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, Register tmp) { + if (is_oop) { + gen_write_ref_array_post_barrier(masm, decorators, addr, count, tmp); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP +#define CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, , int callee_saved_regs) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) {} + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, int callee_saved_regs); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, + Register addr, Register count, Register tmp); +}; + +#endif // CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,9 +24,9 @@ #include "precompiled.hpp" #include "jvm.h" -#include "gc/shared/barrierSet.inline.hpp" +#include "gc/shared/barrierSet.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.inline.hpp" +#include "gc/shared/cardTableBarrierSet.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "interp_masm_arm.hpp" #include "interpreter/interpreter.hpp" @@ -40,6 +40,7 @@ #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #if INCLUDE_ALL_GCS @@ -411,10 +412,10 @@ void InterpreterMacroAssembler::store_check_part1(Register card_table_base) { // Check barrier set type (should be card table) and element size BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code"); @@ -473,7 +474,7 @@ #ifdef AARCH64 strb(ZR, card_table_addr); #else - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); CardTable* ct = ctbs->card_table(); if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) { // Card table is aligned so the lowest byte of the table address base is zero.
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -31,11 +32,26 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" #define __ _masm-> +Interpreter::SignatureHandlerGenerator::SignatureHandlerGenerator( + const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); + _abi_offset = 0; + _ireg = is_static() ? 2 : 1; +#ifdef __ABI_HARD__ +#ifdef AARCH64 + _freg = 0; +#else + _fp_slot = 0; + _single_fpr_slot = 0; +#endif +#endif +} + #ifdef SHARING_FAST_NATIVE_FINGERPRINTS // mapping from SignatureIterator param to (common) type of parsing static const u1 shared_type[] = {
--- a/src/hotspot/cpu/arm/interpreterRT_arm.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/interpreterRT_arm.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,6 @@ #ifndef CPU_ARM_VM_INTERPRETERRT_ARM_HPP #define CPU_ARM_VM_INTERPRETERRT_ARM_HPP -#include "memory/allocation.hpp" - // native method calls class SignatureHandlerGenerator: public NativeSignatureIterator { @@ -56,23 +54,10 @@ #endif public: // Creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); - _abi_offset = 0; - _ireg = is_static() ? 2 : 1; -#ifdef __ABI_HARD__ -#ifdef AARCH64 - _freg = 0; -#else - _fp_slot = 0; - _single_fpr_slot = 0; -#endif -#endif - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // Code generation void generate(uint64_t fingerprint); - }; #ifndef AARCH64
--- a/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciJavaClasses.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_arm.inline.hpp"
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -30,14 +30,14 @@ #include "code/nativeInst.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp" @@ -2267,7 +2267,7 @@ DirtyCardQueue::byte_offset_of_buf())); BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); Label done; Label runtime;
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,6 +34,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" #define __ _masm->
--- a/src/hotspot/cpu/arm/runtime_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/runtime_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -32,7 +32,7 @@ #include "memory/resourceArea.hpp" #include "nativeInst_arm.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,8 +25,8 @@ #include "precompiled.hpp" #include "asm/assembler.hpp" #include "assembler_arm.inline.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_arm.hpp" #include "oops/instanceOop.hpp" @@ -2855,148 +2855,6 @@ return start; } -#if INCLUDE_ALL_GCS - // - // Generate pre-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count, 32-bit int - // callee_saved_regs - - // the call must preserve this number of registers: R0, R1, ..., R[callee_saved_regs-1] - // - // callee_saved_regs must include addr and count - // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs. - void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) { - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - assert( addr->encoding() < callee_saved_regs, "addr must be saved"); - assert(count->encoding() < callee_saved_regs, "count must be saved"); - - BLOCK_COMMENT("PreBarrier"); - -#ifdef AARCH64 - callee_saved_regs = align_up(callee_saved_regs, 2); - for (int i = 0; i < callee_saved_regs; i += 2) { - __ raw_push(as_Register(i), as_Register(i+1)); - } -#else - RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1)); - __ push(saved_regs | R9ifScratched); -#endif // AARCH64 - - if (addr != R0) { - assert_different_registers(count, R0); - __ mov(R0, addr); - } -#ifdef AARCH64 - __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t -#else - if (count != R1) { - __ mov(R1, count); - } -#endif // AARCH64 - - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); - -#ifdef AARCH64 - for (int i = callee_saved_regs - 2; i >= 0; i -= 2) { - __ raw_pop(as_Register(i), as_Register(i+1)); - } -#else - __ pop(saved_regs | R9ifScratched); -#endif // AARCH64 - } - case BarrierSet::CardTableModRef: - break; - default: - ShouldNotReachHere(); - } - } -#endif // INCLUDE_ALL_GCS - - // - // Generate post-write barrier for array. - // - // Input: - // addr - register containing starting address (can be scratched) - // count - register containing element count, 32-bit int (can be scratched) - // tmp - scratch register - // - // Note: LR can be scratched but might be equal to addr, count or tmp - // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR). - void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp) { - assert_different_registers(addr, count, tmp); - BarrierSet* bs = Universe::heap()->barrier_set(); - - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - BLOCK_COMMENT("G1PostBarrier"); - if (addr != R0) { - assert_different_registers(count, R0); - __ mov(R0, addr); - } -#ifdef AARCH64 - __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_post takes size_t -#else - if (count != R1) { - __ mov(R1, count); - } -#if R9_IS_SCRATCHED - // Safer to save R9 here since callers may have been written - // assuming R9 survives. This is suboptimal but is not in - // general worth optimizing for the few platforms where R9 - // is scratched. Note that the optimization might not be to - // difficult for this particular call site. - __ push(R9); -#endif -#endif // !AARCH64 - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); -#ifndef AARCH64 -#if R9_IS_SCRATCHED - __ pop(R9); -#endif -#endif // !AARCH64 - } - break; - case BarrierSet::CardTableModRef: - { - BLOCK_COMMENT("CardTablePostBarrier"); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - - Label L_cardtable_loop, L_done; - - __ cbz_32(count, L_done); // zero count - nothing to do - - __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop); - __ sub(count, count, BytesPerHeapOop); // last addr - - __ logical_shift_right(addr, addr, CardTable::card_shift); - __ logical_shift_right(count, count, CardTable::card_shift); - __ sub(count, count, addr); // nb of cards - - // warning: Rthread has not been preserved - __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference); - __ add(addr,tmp, addr); - - Register zero = __ zero_register(tmp); - - __ BIND(L_cardtable_loop); - __ strb(zero, Address(addr, 1, post_indexed)); - __ subs(count, count, 1); - __ b(L_cardtable_loop, ge); - __ BIND(L_done); - } - break; - default: - ShouldNotReachHere(); - } - } // Generates pattern of code to be placed after raw data copying in generate_oop_copy // Includes return from arraycopy stub. @@ -3007,7 +2865,7 @@ // count: total number of copied elements, 32-bit int // // Blows all volatile (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) and 'to', 'count', 'tmp' registers. - void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward) { + void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) { assert_different_registers(to, count, tmp); if (forward) { @@ -3018,7 +2876,8 @@ // 'to' is the beginning of the region - gen_write_ref_array_post_barrier(to, count, tmp); + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); + bs->arraycopy_epilogue(this, decorators, true, to, count, tmp); if (status) { __ mov(R0, 0); // OK @@ -3086,9 +2945,16 @@ __ push(LR); #endif // AARCH64 -#if INCLUDE_ALL_GCS - gen_write_ref_array_pre_barrier(to, count, callee_saved_regs); -#endif // INCLUDE_ALL_GCS + DecoratorSet decorators = 0; + if (disjoint) { + decorators |= ARRAYCOPY_DISJOINT; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs); // save arguments for barrier generation (after the pre barrier) __ mov(saved_count, count); @@ -3146,12 +3012,12 @@ } assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count"); - oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward); + oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators); { copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array); - oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward); + oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators); } if (!to_is_aligned) { @@ -3165,7 +3031,7 @@ int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward); assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count"); - oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward); + oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators); } return start; @@ -3336,7 +3202,7 @@ const int callee_saved_regs = AARCH64_ONLY(5) NOT_AARCH64(4); // LR saved differently - Label load_element, store_element, do_card_marks, fail; + Label load_element, store_element, do_epilogue, fail; BLOCK_COMMENT("Entry:"); @@ -3351,9 +3217,10 @@ pushed+=1; #endif // AARCH64 -#if INCLUDE_ALL_GCS - gen_write_ref_array_pre_barrier(to, count, callee_saved_regs); -#endif // INCLUDE_ALL_GCS + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + + BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs); #ifndef AARCH64 const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11; @@ -3399,7 +3266,7 @@ __ subs_32(count,count,1); __ str(R5, Address(to, BytesPerHeapOop, post_indexed)); // store the oop } - __ b(do_card_marks, eq); // count exhausted + __ b(do_epilogue, eq); // count exhausted // ======== loop entry is here ======== __ BIND(load_element); @@ -3421,7 +3288,7 @@ // Note: fail marked by the fact that count differs from saved_count - __ BIND(do_card_marks); + __ BIND(do_epilogue); Register copied = AARCH64_ONLY(R20) NOT_AARCH64(R4); // saved Label L_not_copied; @@ -3431,7 +3298,7 @@ __ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value __ mov(R12, copied); // count arg scratched by post barrier - gen_write_ref_array_post_barrier(to, R12, R3); + bs->arraycopy_epilogue(this, decorators, true, to, R12, R3); assert_different_registers(R3,R12,LR,copied,saved_count); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -34,6 +34,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -228,7 +229,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (is_null) { __ store_heap_oop_null(new_val, obj);
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,13 +25,13 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,9 +33,10 @@ #include "ci/ciInstance.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "nativeInst_ppc.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" @@ -1858,34 +1859,31 @@ if (op->expected_type() == NULL) { assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && length->is_nonvolatile(), "must preserve"); + address copyfunc_addr = StubRoutines::generic_arraycopy(); + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); + // 3 parms are int. Convert to long. __ mr(R3_ARG1, src); __ extsw(R4_ARG2, src_pos); __ mr(R5_ARG3, dst); __ extsw(R6_ARG4, dst_pos); __ extsw(R7_ARG5, length); - address copyfunc_addr = StubRoutines::generic_arraycopy(); - - if (copyfunc_addr == NULL) { // Use C version if stub was not generated. - address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); - __ call_c_with_frame_resize(entry, frame_resize); - } else { + #ifndef PRODUCT - if (PrintC1Statistics) { - address counter = (address)&Runtime1::_generic_arraycopystub_cnt; - int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); - __ lwz(R11_scratch1, simm16_offs, tmp); - __ addi(R11_scratch1, R11_scratch1, 1); - __ stw(R11_scratch1, simm16_offs, tmp); - } + if (PrintC1Statistics) { + address counter = (address)&Runtime1::_generic_arraycopystub_cnt; + int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); + __ lwz(R11_scratch1, simm16_offs, tmp); + __ addi(R11_scratch1, R11_scratch1, 1); + __ stw(R11_scratch1, simm16_offs, tmp); + } #endif - __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); - - __ nand(tmp, R3_RET, R3_RET); - __ subf(length, tmp, length); - __ add(src_pos, tmp, src_pos); - __ add(dst_pos, tmp, dst_pos); - } + __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); + + __ nand(tmp, R3_RET, R3_RET); + __ subf(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ add(dst_pos, tmp, dst_pos); __ cmpwi(CCR0, R3_RET, 0); __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/frame_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -425,4 +425,6 @@ pc_return_offset = 0 }; + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_PPC_VM_FRAME_PPC_HPP
--- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -179,10 +179,6 @@ return (intptr_t*)interpreter_frame_monitor_end() - 1; } -inline jint frame::interpreter_frame_expression_stack_direction() { - return -1; -} - // top of expression stack inline intptr_t* frame::interpreter_frame_tos_address() const { return ((intptr_t*) get_ijava_state()->esp) + Interpreter::stackElementWords;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "runtime/thread.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register from, Register to, Register count, + Register preserve1, Register preserve2) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + // With G1, don't generate the call if we statically know that the target in uninitialized + if (!dest_uninitialized) { + int spill_slots = 3; + if (preserve1 != noreg) { spill_slots++; } + if (preserve2 != noreg) { spill_slots++; } + const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); + Label filtered; + + // Is marking active? + if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { + __ lwz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread); + } else { + guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); + __ lbz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread); + } + __ cmpdi(CCR0, R0, 0); + __ beq(CCR0, filtered); + + __ save_LR_CR(R0); + __ push_frame(frame_size, R0); + int slot_nr = 0; + __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP); + __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP); + __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP); + if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); } + if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); } + + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count); + } + + slot_nr = 0; + __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP); + __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP); + __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP); + if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); } + if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); } + __ addi(R1_SP, R1_SP, frame_size); // pop_frame() + __ restore_LR_CR(R0); + + __ bind(filtered); + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register preserve) { + int spill_slots = (preserve != noreg) ? 1 : 0; + const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); + + __ save_LR_CR(R0); + __ push_frame(frame_size, R0); + if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); } + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count); + if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); } + __ addi(R1_SP, R1_SP, frame_size); // pop_frame(); + __ restore_LR_CR(R0); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP +#define CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count, + Register preserve1, Register preserve2); + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve); +}; + +#endif // CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP +#define CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class InterpreterMacroAssembler; + +class BarrierSetAssembler: public CHeapObj<mtGC> { +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count, Register preserve1, Register preserve2) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, Register preserve) {} +}; + +#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, + Register count, Register preserve) { + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + assert_different_registers(addr, count, R0); + + Label Lskip_loop, Lstore_loop; + + if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); } + + __ sldi_(count, count, LogBytesPerHeapOop); + __ beq(CCR0, Lskip_loop); // zero length + __ addi(count, count, -BytesPerHeapOop); + __ add(count, addr, count); + // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) + __ srdi(addr, addr, CardTable::card_shift); + __ srdi(count, count, CardTable::card_shift); + __ subf(count, addr, count); + __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0); + __ addi(count, count, 1); + __ li(R0, 0); + __ mtctr(count); + // Byte store loop + __ bind(Lstore_loop); + __ stb(R0, 0, addr); + __ addi(addr, addr, 1); + __ bdnz(Lstore_loop); + __ bind(Lskip_loop); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP +#define CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, + Register count, Register preserve); +}; + +#endif // CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count, Register preserve1, Register preserve2) { + if (type == T_OBJECT) { + gen_write_ref_array_pre_barrier(masm, decorators, src, dst, count, preserve1, preserve2); + + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + if (!checkcast) { + assert_different_registers(dst, count, R9_ARG7, R10_ARG8); + // Save some arguments for epilogue, e.g. disjoint_long_copy_core destroys them. + __ mr(R9_ARG7, dst); + __ mr(R10_ARG8, count); + } + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, Register preserve) { + if (type == T_OBJECT) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + if (!checkcast) { + gen_write_ref_array_post_barrier(masm, decorators, R9_ARG7, R10_ARG8, preserve); + } else { + gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP +#define CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count, + Register preserve1, Register preserve2) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve) {} + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count, Register preserve1, Register preserve2); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, Register preserve); +}; + +#endif // CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -29,6 +29,7 @@ #include "interp_masm_ppc.hpp" #include "interpreter/interpreterRuntime.hpp" #include "prims/jvmtiThreadState.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "asm/assembler.inline.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -33,7 +34,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" #define __ _masm-> @@ -46,6 +47,12 @@ // Implementation of SignatureHandlerGenerator +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( + const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); + _num_used_fp_arg_regs = 0; +} + void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { Argument jni_arg(jni_offset()); Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2014 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,8 +26,8 @@ #ifndef CPU_PPC_VM_INTERPRETERRT_PPC_HPP #define CPU_PPC_VM_INTERPRETERRT_PPC_HPP -#include "asm/macroAssembler.hpp" -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. // native method calls @@ -45,10 +45,7 @@ public: // Creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); - _num_used_fp_arg_regs = 0; - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // Code generation void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/ppc/jvmciCodeInstaller_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/jvmciCodeInstaller_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciJavaClasses.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_ppc.inline.hpp"
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -35,7 +35,7 @@ #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" @@ -3036,9 +3036,9 @@ // Write the card table byte if needed. void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) { - CardTableModRefBS* bs = - barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + CardTableBarrierSet* bs = + barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); CardTable* ct = bs->card_table(); #ifdef ASSERT cmpdi(CCR0, Rnew_val, 0);
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,6 +31,8 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "utilities/preserveException.hpp" #define __ _masm->
--- a/src/hotspot/cpu/ppc/ppc.ad Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/ppc.ad Sat Mar 24 01:08:35 2018 +0100 @@ -1274,12 +1274,12 @@ return offsets; } const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); - + // Emit the trampoline stub which will be related to the branch-and-link below. CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset); if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full. __ relocate(rtype); - + // Note: At this point we do not have the address of the trampoline // stub, and the entry point might be too far away for bl, so __ pc() // serves as dummy and the bl will be patched later. @@ -1526,7 +1526,7 @@ // Save return pc. ___(std) std(return_pc, _abi(lr), callers_sp); } - + C->set_frame_complete(cbuf.insts_size()); } #undef ___ @@ -2695,13 +2695,13 @@ ciEnv::current()->record_out_of_memory_failure(); return; } - + // Get the constant's TOC offset. toc_offset = __ offset_to_method_toc(const_toc_addr); - + // Keep the current instruction offset in mind. ((loadConLNode*)this)->_cbuf_insts_offset = __ offset(); - + __ ld($dst$$Register, toc_offset, $toc$$Register); %} @@ -2819,7 +2819,7 @@ MachNode *_last; } loadConLReplicatedNodesTuple; -loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, +loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, vecXOper *dst, immI_0Oper *zero, OptoReg::Name reg_second, OptoReg::Name reg_first, OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) { @@ -3158,7 +3158,7 @@ Label skip_storestore; #if 0 // TODO: PPC port - // Check CMSCollectorCardTableModRefBSExt::_requires_release and do the + // Check CMSCollectorCardTableBarrierSetBSExt::_requires_release and do the // StoreStore barrier conditionally. __ lwz(R0, 0, $releaseFieldAddr$$Register); __ cmpwi($crx$$CondRegister, R0, 0); @@ -6852,7 +6852,7 @@ // Card-mark for CMS garbage collection. // This cardmark does an optimization so that it must not always // do a releasing store. For this, it gets the address of -// CMSCollectorCardTableModRefBSExt::_requires_release as input. +// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input. // (Using releaseFieldAddr in the match rule is a hack.) instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{ match(Set mem (StoreCM mem releaseFieldAddr)); @@ -6871,7 +6871,7 @@ // Card-mark for CMS garbage collection. // This cardmark does an optimization so that it must not always // do a releasing store. For this, it needs the constant address of -// CMSCollectorCardTableModRefBSExt::_requires_release. +// CMSCollectorCardTableBarrierSetBSExt::_requires_release. // This constant address is split off here by expand so we can use // adlc / matcher functionality to load it from the constant section. instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{ @@ -6879,7 +6879,7 @@ predicate(UseConcMarkSweepGC); expand %{ - immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %} + immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %} iRegLdst releaseFieldAddress; flagsReg crx; loadConL_Ex(releaseFieldAddress, baseImm); @@ -13665,7 +13665,7 @@ instruct mtvsrwz(vecX temp1, iRegIsrc src) %{ effect(DEF temp1, USE src); - + size(4); ins_encode %{ __ mtvsrwz($temp1$$VectorSRegister, $src$$Register); @@ -13678,7 +13678,7 @@ size(4); ins_encode %{ - __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); + __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant); %} ins_pipe(pipe_class_default); %} @@ -13843,7 +13843,7 @@ expand %{ iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveReg(tmpL, src); repl48(tmpL); repl32(tmpL); @@ -13915,10 +13915,10 @@ predicate(n->as_Vector()->length() == 4); ins_cost(2 * DEFAULT_COST); - expand %{ + expand %{ iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveReg(tmpL, src); repl32(tmpL); mtvsrd(tmpV, tmpL); @@ -14057,7 +14057,7 @@ iRegIdst tmpI; iRegLdst tmpL; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveF2I_reg_stack(tmpS, src); // Move float to stack. moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg. @@ -14096,7 +14096,7 @@ iRegLdst tmpL; iRegLdst tmp; vecX tmpV; - immI8 zero %{ (int) 0 %} + immI8 zero %{ (int) 0 %} moveD2L_reg_stack(tmpS, src); moveD2L_stack_reg(tmpL, tmpS); mtvsrd(tmpV, tmpL); @@ -14132,7 +14132,7 @@ predicate(false); effect(DEF dst, USE src); - format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%} + format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%} size(4); ins_encode %{ __ mtvsrd($dst$$VectorSRegister, $src$$Register); @@ -14147,7 +14147,7 @@ size(4); ins_encode %{ __ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant); - %} + %} ins_pipe(pipe_class_default); %} @@ -14158,7 +14158,7 @@ size(4); ins_encode %{ __ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant); - %} + %} ins_pipe(pipe_class_default); %} @@ -14167,8 +14167,8 @@ predicate(n->as_Vector()->length() == 2); expand %{ vecX tmpV; - immI8 zero %{ (int) 0 %} - mtvsrd(tmpV, src); + immI8 zero %{ (int) 0 %} + mtvsrd(tmpV, src); xxpermdi(dst, tmpV, tmpV, zero); %} %}
--- a/src/hotspot/cpu/ppc/runtime_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,7 +33,7 @@ #include "memory/resourceArea.hpp" #include "nativeInst_ppc.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -29,10 +29,12 @@ #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" #include "frame_ppc.hpp" +#include "gc/shared/gcLocker.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,8 +25,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_ppc.hpp" #include "oops/instanceOop.hpp" @@ -612,137 +612,6 @@ #undef __ #define __ _masm-> - // Generate G1 pre-write barrier for array. - // - // Input: - // from - register containing src address (only needed for spilling) - // to - register containing starting address - // count - register containing element count - // tmp - scratch register - // - // Kills: - // nothing - // - void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1, - Register preserve1 = noreg, Register preserve2 = noreg) { - BarrierSet* const bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target in uninitialized - if (!dest_uninitialized) { - int spill_slots = 3; - if (preserve1 != noreg) { spill_slots++; } - if (preserve2 != noreg) { spill_slots++; } - const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); - Label filtered; - - // Is marking active? - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread); - } else { - guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); - __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread); - } - __ cmpdi(CCR0, Rtmp1, 0); - __ beq(CCR0, filtered); - - __ save_LR_CR(R0); - __ push_frame(frame_size, R0); - int slot_nr = 0; - __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP); - __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP); - __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP); - if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); } - if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); } - - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count); - - slot_nr = 0; - __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP); - __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP); - __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP); - if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); } - if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); } - __ addi(R1_SP, R1_SP, frame_size); // pop_frame() - __ restore_LR_CR(R0); - - __ bind(filtered); - } - break; - case BarrierSet::CardTableModRef: - break; - default: - ShouldNotReachHere(); - } - } - - // Generate CMS/G1 post-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count - // tmp - scratch register - // - // The input registers and R0 are overwritten. - // - void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) { - BarrierSet* const bs = Universe::heap()->barrier_set(); - - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - int spill_slots = (preserve != noreg) ? 1 : 0; - const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes); - - __ save_LR_CR(R0); - __ push_frame(frame_size, R0); - if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); - if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); } - __ addi(R1_SP, R1_SP, frame_size); // pop_frame(); - __ restore_LR_CR(R0); - } - break; - case BarrierSet::CardTableModRef: - { - Label Lskip_loop, Lstore_loop; - if (UseConcMarkSweepGC) { - // TODO PPC port: contribute optimization / requires shared changes - __ release(); - } - - CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* const ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - assert_different_registers(addr, count, tmp); - - __ sldi(count, count, LogBytesPerHeapOop); - __ addi(count, count, -BytesPerHeapOop); - __ add(count, addr, count); - // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) - __ srdi(addr, addr, CardTable::card_shift); - __ srdi(count, count, CardTable::card_shift); - __ subf(count, addr, count); - assert_different_registers(R0, addr, count, tmp); - __ load_const(tmp, (address)ct->byte_map_base()); - __ addic_(count, count, 1); - __ beq(CCR0, Lskip_loop); - __ li(R0, 0); - __ mtctr(count); - // Byte store loop - __ bind(Lstore_loop); - __ stbx(R0, tmp, addr); - __ addi(addr, addr, 1); - __ bdnz(Lstore_loop); - __ bind(Lskip_loop); - } - break; - case BarrierSet::ModRef: - break; - default: - ShouldNotReachHere(); - } - } // Support for void zero_words_aligned8(HeapWord* to, size_t count) // @@ -2155,11 +2024,16 @@ STUB_ENTRY(arrayof_oop_disjoint_arraycopy) : STUB_ENTRY(oop_disjoint_arraycopy); - gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); - - // Save arguments. - __ mr(R9_ARG7, R4_ARG2); - __ mr(R10_ARG8, R5_ARG3); + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); if (UseCompressedOops) { array_overlap_test(nooverlap_target, 2); @@ -2169,7 +2043,7 @@ generate_conjoint_long_copy_core(aligned); } - gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); __ li(R3_RET, 0); // return 0 __ blr(); return start; @@ -2188,12 +2062,17 @@ StubCodeMark mark(this, "StubRoutines", name); address start = __ function_entry(); assert_positive_int(R5_ARG3); - gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7); - - // save some arguments, disjoint_long_copy_core destroys them. - // needed for post barrier - __ mr(R9_ARG7, R4_ARG2); - __ mr(R10_ARG8, R5_ARG3); + + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg); if (UseCompressedOops) { generate_disjoint_int_copy_core(aligned); @@ -2201,7 +2080,7 @@ generate_disjoint_long_copy_core(aligned); } - gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg); __ li(R3_RET, 0); // return 0 __ blr(); @@ -2280,11 +2159,17 @@ } #endif - gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval); + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval); //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET); - Label load_element, store_element, store_null, success, do_card_marks; + Label load_element, store_element, store_null, success, do_epilogue; __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it. __ li(R8_offset, 0); // Offset from start of arrays. __ li(R2_minus1, -1); @@ -2328,15 +2213,15 @@ // and report their number to the caller. __ subf_(R5_count, R9_remain, R5_count); __ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller - __ bne(CCR0, do_card_marks); + __ bne(CCR0, do_epilogue); __ blr(); __ bind(success); __ li(R3_RET, 0); - __ bind(do_card_marks); - // Store check on R4_to[0..R5_count-1]. - gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET); + __ bind(do_epilogue); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET); + __ blr(); return start; }
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -34,6 +34,8 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -103,7 +105,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { Label Lnull, Ldone; if (Rval != noreg) {
--- a/src/hotspot/cpu/s390/assembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/assembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -28,11 +28,11 @@ #include "compiler/disassembler.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,9 +33,10 @@ #include "ci/ciInstance.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "nativeInst_s390.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_s390.inline.hpp" @@ -631,7 +632,7 @@ }; // Index register is normally not supported, but for - // LIRGenerator::CardTableModRef_post_barrier we make an exception. + // LIRGenerator::CardTableBarrierSet_post_barrier we make an exception. if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) { __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); store_offset = __ offset(); @@ -1895,6 +1896,15 @@ // If we don't know anything, just go through the generic arraycopy. if (default_type == NULL) { + address copyfunc_addr = StubRoutines::generic_arraycopy(); + + if (copyfunc_addr == NULL) { + // Take a slow path for generic arraycopy. + __ branch_optimized(Assembler::bcondAlways, *stub->entry()); + __ bind(*stub->continuation()); + return; + } + Label done; // Save outgoing arguments in callee saved registers (C convention) in case // a call to System.arraycopy is needed. @@ -1915,10 +1925,6 @@ __ z_lgfr(dst_pos, dst_pos); __ z_lgfr(length, length); - address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); - - address copyfunc_addr = StubRoutines::generic_arraycopy(); - // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. // The arguments are in the corresponding registers. @@ -1927,25 +1933,19 @@ assert(Z_ARG3 == dst, "assumption"); assert(Z_ARG4 == dst_pos, "assumption"); assert(Z_ARG5 == length, "assumption"); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated. - emit_call_c(C_entry); - } else { #ifndef PRODUCT - if (PrintC1Statistics) { - __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); - __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); - } + if (PrintC1Statistics) { + __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); + __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); + } #endif - emit_call_c(copyfunc_addr); - } + emit_call_c(copyfunc_addr); CHECK_BAILOUT(); __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); - if (copyfunc_addr != NULL) { - __ z_lgr(tmp, Z_RET); - __ z_xilf(tmp, -1); - } + __ z_lgr(tmp, Z_RET); + __ z_xilf(tmp, -1); // Restore values from callee saved registers so they are where the stub // expects them. @@ -1955,11 +1955,9 @@ __ lgr_if_needed(dst_pos, callee_saved_dst_pos); __ lgr_if_needed(length, callee_saved_length); - if (copyfunc_addr != NULL) { - __ z_sr(length, tmp); - __ z_ar(src_pos, tmp); - __ z_ar(dst_pos, tmp); - } + __ z_sr(length, tmp); + __ z_ar(src_pos, tmp); + __ z_ar(dst_pos, tmp); __ branch_optimized(Assembler::bcondAlways, *stub->entry()); __ bind(*stub->continuation());
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_s390.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/s390/frame_s390.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/frame_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -549,4 +549,6 @@ pc_return_offset = 0, }; + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_S390_VM_FRAME_S390_HPP
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -175,10 +175,6 @@ return (intptr_t*)interpreter_frame_monitor_end() - 1; } -inline jint frame::interpreter_frame_expression_stack_direction() { - return -1; -} - inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const { return &interpreter_frame_tos_address()[offset]; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "registerSaver_s390.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "runtime/thread.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +#define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + + // With G1, don't generate the call if we statically know that the target is uninitialized. + if (!dest_uninitialized) { + // Is marking active? + Label filtered; + assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame() + assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame() + Register Rtmp1 = Z_R0_scratch; + const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + + SATBMarkQueue::byte_offset_of_active()); + if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { + __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); + } else { + guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); + __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); + } + __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. + + RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame. + + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), addr, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), addr, count); + } + + RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers); + + __ bind(filtered); + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, bool do_return) { + address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry); + if (!do_return) { + assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame() + assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame() + RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame. + __ call_VM_leaf(entry_point, addr, count); + RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers); + } else { + // Tail call: call c and return to stub caller. + __ lgr_if_needed(Z_ARG1, addr); + __ lgr_if_needed(Z_ARG2, count); + __ load_const(Z_R1, entry_point); + __ z_br(Z_R1); // Branch without linking, callee will return to stub caller. + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP +#define CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { + protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count); + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, bool do_return); +}; + +#endif // CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP +#define CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class InterpreterMacroAssembler; + +class BarrierSetAssembler: public CHeapObj<mtGC> { +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, bool do_return = false); +}; + +#endif // CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, + bool do_return) { + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + + NearLabel doXC, done; + assert_different_registers(Z_R0, Z_R1, addr, count); + + // Nothing to do if count <= 0. + if (!do_return) { + __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done); + } else { + __ z_ltgr(count, count); + __ z_bcr(Assembler::bcondNotPositive, Z_R14); + } + + // Note: We can't combine the shifts. We could lose a carry + // from calculating the array end address. + // count = (count-1)*BytesPerHeapOop + addr + // Count holds addr of last oop in array then. + __ z_sllg(count, count, LogBytesPerHeapOop); + __ add2reg_with_index(count, -BytesPerHeapOop, count, addr); + + // Get base address of card table. + __ load_const_optimized(Z_R1, (address)ct->byte_map_base()); + + // count = (count>>shift) - (addr>>shift) + __ z_srlg(addr, addr, CardTable::card_shift); + __ z_srlg(count, count, CardTable::card_shift); + + // Prefetch first elements of card table for update. + if (VM_Version::has_Prefetch()) { + __ z_pfd(0x02, 0, addr, Z_R1); + } + + // Special case: clear just one byte. + __ clear_reg(Z_R0, true, false); // Used for doOneByte. + __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below. + __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr. + if (!do_return) { + __ z_brz(done); + } else { + __ z_bcr(Assembler::bcondZero, Z_R14); + } + + __ z_cghi(count, 255); + __ z_brnh(doXC); + + // MVCLE: clear a long area. + // Start addr of card table range = base + addr. + // # bytes in card table range = (count + 1) + __ add2reg_with_index(Z_R0, 0, Z_R1, addr); + __ add2reg(Z_R1, 1, count); + + // dirty hack: + // There are just two callers. Both pass + // count in Z_ARG3 = Z_R4 + // addr in Z_ARG2 = Z_R3 + // ==> use Z_ARG2 as src len reg = 0 + // Z_ARG1 as src addr (ignored) + assert(count == Z_ARG3, "count: unexpected register number"); + assert(addr == Z_ARG2, "addr: unexpected register number"); + __ clear_reg(Z_ARG2, true, false); + + __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0); + + if (!do_return) { + __ z_bru(done); + } else { + __ z_bcr(Assembler::bcondAlways, Z_R14); + } + + // XC: clear a short area. + Label XC_template; // Instr template, never exec directly! + __ bind(XC_template); + __ z_xc(0, 0, addr, 0, addr); + + __ bind(doXC); + // start addr of card table range = base + addr + // end addr of card table range = base + addr + count + __ add2reg_with_index(addr, 0, Z_R1, addr); + + if (VM_Version::has_ExecuteExtensions()) { + __ z_exrl(count, XC_template); // Execute XC with var. len. + } else { + __ z_larl(Z_R1, XC_template); + __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len. + } + if (do_return) { + __ z_br(Z_R14); + } + + __ bind(done); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, + bool do_return); +}; + +#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, + bool do_return) { + if (do_return) { __ z_br(Z_R14); } +} + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + if (type == T_OBJECT || type == T_ARRAY) { + gen_write_ref_array_pre_barrier(masm, decorators, dst, count); + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, bool do_return) { + if (type == T_OBJECT || type == T_ARRAY) { + gen_write_ref_array_post_barrier(masm, decorators, dst, count, do_return); + } else { + if (do_return) { __ z_br(Z_R14); } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, + bool do_return); + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register dst, Register count, bool do_return = false); +}; + +#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -36,6 +36,7 @@ #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/s390/interpreterRT_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/interpreterRT_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -32,7 +33,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" // Access macros for Java and C arguments. @@ -64,6 +65,11 @@ } // Implementation of SignatureHandlerGenerator +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( + const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); + _fp_arg_nr = 0; +} void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { int int_arg_nr = jni_offset() - _fp_arg_nr;
--- a/src/hotspot/cpu/s390/interpreterRT_s390.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/interpreterRT_s390.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,7 +26,8 @@ #ifndef CPU_S390_VM_INTERPRETERRT_S390_HPP #define CPU_S390_VM_INTERPRETERRT_S390_HPP -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. static int binary_search(int key, LookupswitchPair* array, int n); @@ -51,10 +52,7 @@ public: // creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); - _fp_arg_nr = 0; - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // code generation void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -34,6 +34,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciJavaClasses.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_s390.inline.hpp"
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -30,7 +30,7 @@ #include "gc/shared/cardTable.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/klass.inline.hpp" @@ -41,7 +41,7 @@ #include "registerSaver_s390.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" @@ -3505,9 +3505,9 @@ // Write to card table for modification at store_addr - register is destroyed afterwards. void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) { BarrierSet* bs = Universe::heap()->barrier_set(); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); assert_different_registers(store_addr, tmp); z_srlg(store_addr, store_addr, CardTable::card_shift); load_absolute_address(tmp, (address)ct->byte_map_base());
--- a/src/hotspot/cpu/s390/methodHandles_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2017, SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,6 +31,8 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "utilities/preserveException.hpp" #ifdef PRODUCT #define __ _masm->
--- a/src/hotspot/cpu/s390/runtime_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/runtime_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -32,7 +32,7 @@ #include "memory/resourceArea.hpp" #include "nativeInst_s390.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -28,11 +28,13 @@ #include "code/debugInfoRec.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" +#include "gc/shared/gcLocker.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" #include "registerSaver_s390.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,8 +26,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" #include "registerSaver_s390.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interp_masm.hpp" #include "nativeInst_s390.hpp" @@ -686,188 +686,6 @@ return start; } - // Generate pre-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count - // - // The input registers are overwritten. - void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { - - BarrierSet* const bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target is uninitialized. - if (!dest_uninitialized) { - // Is marking active? - Label filtered; - assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame() - assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame() - Register Rtmp1 = Z_R0_scratch; - const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + - SATBMarkQueue::byte_offset_of_active()); - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); - } else { - guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); - __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); - } - __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. - - // __ push_frame_abi160(0); // implicitly done in save_live_registers() - (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count); - (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); - // __ pop_frame(); // implicitly done in restore_live_registers() - - __ bind(filtered); - } - break; - case BarrierSet::CardTableModRef: - case BarrierSet::ModRef: - break; - default: - ShouldNotReachHere(); - } - } - - // Generate post-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count - // - // The input registers are overwritten. - void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) { - BarrierSet* const bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - if (branchToEnd) { - assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame() - assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame() - // __ push_frame_abi160(0); // implicitly done in save_live_registers() - (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); - (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); - // __ pop_frame(); // implicitly done in restore_live_registers() - } else { - // Tail call: call c and return to stub caller. - address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); - __ lgr_if_needed(Z_ARG1, addr); - __ lgr_if_needed(Z_ARG2, count); - __ load_const(Z_R1, entry_point); - __ z_br(Z_R1); // Branch without linking, callee will return to stub caller. - } - } - break; - case BarrierSet::CardTableModRef: - // These cases formerly known as - // void array_store_check(Register addr, Register count, bool branchToEnd). - { - NearLabel doXC, done; - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - assert_different_registers(Z_R0, Z_R1, addr, count); - - // Nothing to do if count <= 0. - if (branchToEnd) { - __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done); - } else { - __ z_ltgr(count, count); - __ z_bcr(Assembler::bcondNotPositive, Z_R14); - } - - // Note: We can't combine the shifts. We could lose a carry - // from calculating the array end address. - // count = (count-1)*BytesPerHeapOop + addr - // Count holds addr of last oop in array then. - __ z_sllg(count, count, LogBytesPerHeapOop); - __ add2reg_with_index(count, -BytesPerHeapOop, count, addr); - - // Get base address of card table. - __ load_const_optimized(Z_R1, (address)ct->byte_map_base()); - - // count = (count>>shift) - (addr>>shift) - __ z_srlg(addr, addr, CardTable::card_shift); - __ z_srlg(count, count, CardTable::card_shift); - - // Prefetch first elements of card table for update. - if (VM_Version::has_Prefetch()) { - __ z_pfd(0x02, 0, addr, Z_R1); - } - - // Special case: clear just one byte. - __ clear_reg(Z_R0, true, false); // Used for doOneByte. - __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below. - __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr. - if (branchToEnd) { - __ z_brz(done); - } else { - __ z_bcr(Assembler::bcondZero, Z_R14); - } - - __ z_cghi(count, 255); - __ z_brnh(doXC); - - // MVCLE: clear a long area. - // Start addr of card table range = base + addr. - // # bytes in card table range = (count + 1) - __ add2reg_with_index(Z_R0, 0, Z_R1, addr); - __ add2reg(Z_R1, 1, count); - - // dirty hack: - // There are just two callers. Both pass - // count in Z_ARG3 = Z_R4 - // addr in Z_ARG2 = Z_R3 - // ==> use Z_ARG2 as src len reg = 0 - // Z_ARG1 as src addr (ignored) - assert(count == Z_ARG3, "count: unexpected register number"); - assert(addr == Z_ARG2, "addr: unexpected register number"); - __ clear_reg(Z_ARG2, true, false); - - __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0); - - if (branchToEnd) { - __ z_bru(done); - } else { - __ z_bcr(Assembler::bcondAlways, Z_R14); - } - - // XC: clear a short area. - Label XC_template; // Instr template, never exec directly! - __ bind(XC_template); - __ z_xc(0, 0, addr, 0, addr); - - __ bind(doXC); - // start addr of card table range = base + addr - // end addr of card table range = base + addr + count - __ add2reg_with_index(addr, 0, Z_R1, addr); - - if (VM_Version::has_ExecuteExtensions()) { - __ z_exrl(count, XC_template); // Execute XC with var. len. - } else { - __ z_larl(Z_R1, XC_template); - __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len. - } - if (!branchToEnd) { - __ z_br(Z_R14); - } - - __ bind(done); - } - break; - case BarrierSet::ModRef: - if (!branchToEnd) { __ z_br(Z_R14); } - break; - default: - ShouldNotReachHere(); - } - } - - // This is to test that the count register contains a positive int value. // Required because C2 does not respect int to long conversion for stub calls. void assert_positive_int(Register count) { @@ -1482,11 +1300,20 @@ unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). unsigned int size = UseCompressedOops ? 4 : 8; - gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); generate_disjoint_copy(aligned, size, true, true); - gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); return __ addr_at(start_off); } @@ -1565,11 +1392,20 @@ // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. - gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3); generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3. - gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true); return __ addr_at(start_off); }
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,6 +33,8 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -260,7 +262,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val_is_null) { __ store_heap_oop_null(val, offset, base);
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,10 +31,12 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_sparc.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" @@ -1878,29 +1880,21 @@ __ mov(dst_pos, O3); __ mov(length, O4); address copyfunc_addr = StubRoutines::generic_arraycopy(); - - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); - } else { + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); + #ifndef PRODUCT - if (PrintC1Statistics) { - address counter = (address)&Runtime1::_generic_arraycopystub_cnt; - __ inc_counter(counter, G1, G3); - } + if (PrintC1Statistics) { + address counter = (address)&Runtime1::_generic_arraycopystub_cnt; + __ inc_counter(counter, G1, G3); + } #endif - __ call_VM_leaf(tmp, copyfunc_addr); - } - - if (copyfunc_addr != NULL) { - __ xor3(O0, -1, tmp); - __ sub(length, tmp, length); - __ add(src_pos, tmp, src_pos); - __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); - __ delayed()->add(dst_pos, tmp, dst_pos); - } else { - __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); - __ delayed()->nop(); - } + __ call_VM_leaf(tmp, copyfunc_addr); + + __ xor3(O0, -1, tmp); + __ sub(length, tmp, length); + __ add(src_pos, tmp, src_pos); + __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); + __ delayed()->add(dst_pos, tmp, dst_pos); __ bind(*stub->continuation()); return; }
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -28,7 +28,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/sparc/frame_sparc.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/frame_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -240,4 +240,6 @@ void interpreter_frame_set_monitors(BasicObjectLock* monitors); public: + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_SPARC_VM_FRAME_SPARC_HPP
--- a/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -99,8 +99,6 @@ return (intptr_t*) sp_addr_at( ImethodDataPtr->sp_offset_in_saved_window()); } -inline jint frame::interpreter_frame_expression_stack_direction() { return -1; } - // bottom(base) of the expression stack (highest address) inline intptr_t* frame::interpreter_frame_expression_stack() const { return (intptr_t*)interpreter_frame_monitors() - 1;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/thread.hpp" +#include "utilities/macros.hpp" + +#define __ masm-> + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + // With G1, don't generate the call if we statically know that the target in uninitialized + if (!dest_uninitialized) { + Register tmp = O5; + assert_different_registers(addr, count, tmp); + Label filtered; + // Is marking active? + if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { + __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp); + } else { + guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, + "Assumption"); + __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp); + } + // Is marking active? + __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); + + __ save_frame(0); + // Save the necessary global regs... will be used after. + if (addr->is_global()) { + __ mov(addr, L0); + } + if (count->is_global()) { + __ mov(count, L1); + } + __ mov(addr->after_save(), O0); + // Get the count into O1 + address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry) + : CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry); + __ call(slowpath); + __ delayed()->mov(count->after_save(), O1); + if (addr->is_global()) { + __ mov(L0, addr); + } + if (count->is_global()) { + __ mov(L1, count); + } + __ restore(); + + __ bind(filtered); + DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + // Get some new fresh output registers. + __ save_frame(0); + __ mov(addr->after_save(), O0); + __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry)); + __ delayed()->mov(count->after_save(), O1); + __ restore(); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP +#define CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count); + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp); +}; + +#endif // CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP +#define CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class InterpreterMacroAssembler; + +class BarrierSetAssembler: public CHeapObj<mtGC> { +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) {} +}; + +#endif // CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/shared/cardTableBarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,72 @@ + +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + assert_different_registers(addr, count, tmp); + + Label L_loop, L_done; + + __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do + + __ sll_ptr(count, LogBytesPerHeapOop, count); + __ sub(count, BytesPerHeapOop, count); + __ add(count, addr, count); + // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) + __ srl_ptr(addr, CardTable::card_shift, addr); + __ srl_ptr(count, CardTable::card_shift, count); + __ sub(count, addr, count); + AddressLiteral rs(ct->byte_map_base()); + __ set(rs, tmp); + __ BIND(L_loop); + __ stb(G0, tmp, addr); + __ subcc(count, 1, count); + __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); + __ delayed()->add(addr, 1, addr); + + __ BIND(L_done); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/shared/cardTableBarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP +#define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp); +}; + +#endif // CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/shared/modRefBarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + if (type == T_OBJECT) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + if (!checkcast) { + // save arguments for barrier generation + __ mov(dst, G1); + __ mov(count, G5); + gen_write_ref_array_pre_barrier(masm, decorators, G1, G5); + } else { + gen_write_ref_array_pre_barrier(masm, decorators, dst, count); + } + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + if (type == T_OBJECT) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + if (!checkcast) { + // O0 is used as temp register + gen_write_ref_array_post_barrier(masm, decorators, G1, G5, O0); + } else { + gen_write_ref_array_post_barrier(masm, decorators, dst, count, O3); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/sparc/gc/shared/modRefBarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP +#define CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {} + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); +}; + +#endif // CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -32,7 +33,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" @@ -40,6 +41,10 @@ // Implementation of SignatureHandlerGenerator +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( + const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); +} void InterpreterRuntime::SignatureHandlerGenerator::pass_word(int size_of_arg, int offset_in_arg) { Argument jni_arg(jni_offset() + offset_in_arg, false);
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,9 @@ #ifndef CPU_SPARC_VM_INTERPRETERRT_SPARC_HPP #define CPU_SPARC_VM_INTERPRETERRT_SPARC_HPP -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. + static int binary_search(int key, LookupswitchPair* array, int n); @@ -52,9 +54,7 @@ public: // Creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // Code generation void generate( uint64_t fingerprint );
--- a/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciJavaClasses.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "utilities/align.hpp" #include "vmreg_sparc.inline.hpp"
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -27,7 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -35,7 +35,7 @@ #include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.inline.hpp" @@ -3729,11 +3729,11 @@ void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) { // If we're writing constant NULL, we can skip the write barrier. if (new_val == G0) return; - CardTableModRefBS* bs = - barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); + CardTableBarrierSet* bs = + barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); CardTable* ct = bs->card_table(); - assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier"); + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier"); card_table_write(ct->byte_map_base(), tmp, store_addr); }
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,8 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "utilities/preserveException.hpp" #define __ _masm->
--- a/src/hotspot/cpu/sparc/runtime_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/runtime_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,7 +31,7 @@ #include "memory/resourceArea.hpp" #include "nativeInst_sparc.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -27,10 +27,12 @@ #include "code/debugInfoRec.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" +#include "gc/shared/gcLocker.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,8 +24,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_sparc.hpp" #include "oops/instanceOop.hpp" @@ -823,125 +823,6 @@ __ delayed()->nop(); } - // - // Generate pre-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count - // tmp - scratch register - // - // The input registers are overwritten. - // - void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target in uninitialized - if (!dest_uninitialized) { - Register tmp = O5; - assert_different_registers(addr, count, tmp); - Label filtered; - // Is marking active? - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp); - } else { - guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, - "Assumption"); - __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp); - } - // Is marking active? - __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); - - __ save_frame(0); - // Save the necessary global regs... will be used after. - if (addr->is_global()) { - __ mov(addr, L0); - } - if (count->is_global()) { - __ mov(count, L1); - } - __ mov(addr->after_save(), O0); - // Get the count into O1 - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); - __ delayed()->mov(count->after_save(), O1); - if (addr->is_global()) { - __ mov(L0, addr); - } - if (count->is_global()) { - __ mov(L1, count); - } - __ restore(); - - __ bind(filtered); - DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp - } - break; - case BarrierSet::CardTableModRef: - break; - default: - ShouldNotReachHere(); - } - } - // - // Generate post-write barrier for array. - // - // Input: - // addr - register containing starting address - // count - register containing element count - // tmp - scratch register - // - // The input registers are overwritten. - // - void gen_write_ref_array_post_barrier(Register addr, Register count, - Register tmp) { - BarrierSet* bs = Universe::heap()->barrier_set(); - - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - // Get some new fresh output registers. - __ save_frame(0); - __ mov(addr->after_save(), O0); - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); - __ delayed()->mov(count->after_save(), O1); - __ restore(); - } - break; - case BarrierSet::CardTableModRef: - { - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - assert_different_registers(addr, count, tmp); - - Label L_loop, L_done; - - __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do - - __ sll_ptr(count, LogBytesPerHeapOop, count); - __ sub(count, BytesPerHeapOop, count); - __ add(count, addr, count); - // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.) - __ srl_ptr(addr, CardTable::card_shift, addr); - __ srl_ptr(count, CardTable::card_shift, count); - __ sub(count, addr, count); - AddressLiteral rs(ct->byte_map_base()); - __ set(rs, tmp); - __ BIND(L_loop); - __ stb(G0, tmp, addr); - __ subcc(count, 1, count); - __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop); - __ delayed()->add(addr, 1, addr); - __ BIND(L_done); - } - break; - case BarrierSet::ModRef: - break; - default: - ShouldNotReachHere(); - } - } // // Generate main code for disjoint arraycopy @@ -2388,18 +2269,25 @@ BLOCK_COMMENT("Entry:"); } - // save arguments for barrier generation - __ mov(to, G1); - __ mov(count, G5); - gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); + assert_clean_int(count, O3); // Make sure 'count' is clean int. if (UseCompressedOops) { generate_disjoint_int_copy_core(aligned); } else { generate_disjoint_long_copy_core(aligned); } - // O0 is used as temp register - gen_write_ref_array_post_barrier(G1, G5, O0); + + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); // O3, O4 are used as temp registers inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); @@ -2438,10 +2326,16 @@ array_overlap_test(nooverlap_target, LogBytesPerHeapOop); - // save arguments for barrier generation - __ mov(to, G1); - __ mov(count, G5); - gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count); if (UseCompressedOops) { generate_conjoint_int_copy_core(aligned); @@ -2449,8 +2343,7 @@ generate_conjoint_long_copy_core(aligned); } - // O0 is used as temp register - gen_write_ref_array_post_barrier(G1, G5, O0); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count); // O3, O4 are used as temp registers inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4); @@ -2552,9 +2445,16 @@ // caller can pass a 64-bit byte count here (from generic stub) BLOCK_COMMENT("Entry:"); } - gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized); - - Label load_element, store_element, do_card_marks, fail, done; + + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); + + Label load_element, store_element, do_epilogue, fail, done; __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it __ brx(Assembler::notZero, false, Assembler::pt, load_element); __ delayed()->mov(G0, O5_offset); // offset from start of arrays @@ -2576,7 +2476,7 @@ __ deccc(G1_remain); // decrement the count __ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop __ inc(O5_offset, heapOopSize); // step to next offset - __ brx(Assembler::zero, true, Assembler::pt, do_card_marks); + __ brx(Assembler::zero, true, Assembler::pt, do_epilogue); __ delayed()->set(0, O0); // return -1 on success // ======== loop entry is here ======== @@ -2600,8 +2500,8 @@ __ brx(Assembler::zero, false, Assembler::pt, done); __ delayed()->not1(O2_count, O0); // report (-1^K) to caller - __ BIND(do_card_marks); - gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2] + __ BIND(do_epilogue); + bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count); __ BIND(done); inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -32,6 +32,8 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -90,7 +92,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (index == noreg ) { assert(Assembler::is_simm13(offset), "fix this code");
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/assembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,13 +25,12 @@ #include "precompiled.hpp" #include "asm/assembler.hpp" #include "asm/assembler.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,10 +33,12 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciInstance.hpp" #include "gc/shared/barrierSet.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.hpp" #include "nativeInst_x86.hpp" #include "oops/objArrayKlass.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "vmreg_x86.inline.hpp" @@ -3057,9 +3059,8 @@ store_parameter(src, 4); NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");) - address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); - address copyfunc_addr = StubRoutines::generic_arraycopy(); + assert(copyfunc_addr != NULL, "generic arraycopy stub required"); // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint #ifdef _LP64 @@ -3077,29 +3078,21 @@ // Allocate abi space for args but be sure to keep stack aligned __ subptr(rsp, 6*wordSize); store_parameter(j_rarg4, 4); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ call(RuntimeAddress(C_entry)); - } else { #ifndef PRODUCT - if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); - } + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } #endif - __ call(RuntimeAddress(copyfunc_addr)); - } + __ call(RuntimeAddress(copyfunc_addr)); __ addptr(rsp, 6*wordSize); #else __ mov(c_rarg4, j_rarg4); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ call(RuntimeAddress(C_entry)); - } else { #ifndef PRODUCT - if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); - } + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } #endif - __ call(RuntimeAddress(copyfunc_addr)); - } + __ call(RuntimeAddress(copyfunc_addr)); #endif // _WIN64 #else __ push(length); @@ -3108,26 +3101,20 @@ __ push(src_pos); __ push(src); - if (copyfunc_addr == NULL) { // Use C version if stub was not generated - __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack - } else { #ifndef PRODUCT - if (PrintC1Statistics) { - __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); - } + if (PrintC1Statistics) { + __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt)); + } #endif - __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack - } + __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack #endif // _LP64 __ cmpl(rax, 0); __ jcc(Assembler::equal, *stub->continuation()); - if (copyfunc_addr != NULL) { - __ mov(tmp, rax); - __ xorl(tmp, -1); - } + __ mov(tmp, rax); + __ xorl(tmp, -1); // Reload values from the stack so they are where the stub // expects them. @@ -3137,11 +3124,9 @@ __ movptr (src_pos, Address(rsp, 3*BytesPerWord)); __ movptr (src, Address(rsp, 4*BytesPerWord)); - if (copyfunc_addr != NULL) { - __ subl(length, tmp); - __ addl(src_pos, tmp); - __ addl(dst_pos, tmp); - } + __ subl(length, tmp); + __ addl(src_pos, tmp); + __ addl(dst_pos, tmp); __ jmp(*stub->entry()); __ bind(*stub->continuation());
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -29,7 +29,7 @@ #include "c1/c1_Runtime1.hpp" #include "ci/ciUtilities.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/x86/frame_x86.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/frame_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,4 +154,6 @@ // deoptimization support void interpreter_frame_set_last_sp(intptr_t* sp); + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_X86_VM_FRAME_X86_HPP
--- a/src/hotspot/cpu/x86/frame_x86.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -223,10 +223,6 @@ return monitor_end-1; } - -inline jint frame::interpreter_frame_expression_stack_direction() { return -1; } - - // Entry frames inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/g1/g1BarrierSet.hpp" +#include "gc/g1/g1BarrierSetAssembler.hpp" +#include "gc/g1/g1CardTable.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "interpreter/interp_masm.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/thread.hpp" +#include "utilities/macros.hpp" + +#define __ masm-> + +void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count) { + bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0; + + if (!dest_uninitialized) { + Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); +#ifndef _LP64 + __ push(thread); + __ get_thread(thread); +#endif + + Label filtered; + Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + + SATBMarkQueue::byte_offset_of_active())); + // Is marking active? + if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { + __ cmpl(in_progress, 0); + } else { + assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); + __ cmpb(in_progress, 0); + } + + NOT_LP64(__ pop(thread);) + + __ jcc(Assembler::equal, filtered); + + __ pusha(); // push registers +#ifdef _LP64 + if (count == c_rarg0) { + if (addr == c_rarg1) { + // exactly backwards!! + __ xchgptr(c_rarg1, c_rarg0); + } else { + __ movptr(c_rarg1, count); + __ movptr(c_rarg0, addr); + } + } else { + __ movptr(c_rarg0, addr); + __ movptr(c_rarg1, count); + } + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2); + } +#else + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), + addr, count); +#endif + __ popa(); + + __ bind(filtered); + } +} + +void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + __ pusha(); // push registers (overkill) +#ifdef _LP64 + if (c_rarg0 == count) { // On win64 c_rarg0 == rcx + assert_different_registers(c_rarg1, addr); + __ mov(c_rarg1, count); + __ mov(c_rarg0, addr); + } else { + assert_different_registers(c_rarg0, count); + __ mov(c_rarg0, addr); + __ mov(c_rarg1, count); + } + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2); +#else + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), + addr, count); +#endif + __ popa(); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class G1BarrierSetAssembler: public ModRefBarrierSetAssembler { + protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count); + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp); +}; + +#endif // CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "memory/allocation.hpp" +#include "oops/access.hpp" + +class InterpreterMacroAssembler; + +class BarrierSetAssembler: public CHeapObj<mtGC> { +protected: +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) {} + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) {} +}; + +#endif // CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/cardTable.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/cardTableBarrierSetAssembler.hpp" +#include "gc/shared/collectedHeap.hpp" + +#define __ masm-> + +#ifdef PRODUCT +#define BLOCK_COMMENT(str) /* nothing */ +#else +#define BLOCK_COMMENT(str) __ block_comment(str) +#endif + +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") + +#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) + +void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, + Register addr, Register count, Register tmp) { + BarrierSet *bs = Universe::heap()->barrier_set(); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); + CardTable* ct = ctbs->card_table(); + assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); + intptr_t disp = (intptr_t) ct->byte_map_base(); + + Label L_loop, L_done; + const Register end = count; + assert_different_registers(addr, end); + + __ testl(count, count); + __ jcc(Assembler::zero, L_done); // zero count - nothing to do + + +#ifdef _LP64 + __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size + __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive + __ shrptr(addr, CardTable::card_shift); + __ shrptr(end, CardTable::card_shift); + __ subptr(end, addr); // end --> cards count + + __ mov64(tmp, disp); + __ addptr(addr, tmp); +__ BIND(L_loop); + __ movb(Address(addr, count, Address::times_1), 0); + __ decrement(count); + __ jcc(Assembler::greaterEqual, L_loop); +#else + __ lea(end, Address(addr, count, Address::times_ptr, -wordSize)); + __ shrptr(addr, CardTable::card_shift); + __ shrptr(end, CardTable::card_shift); + __ subptr(end, addr); // end --> count +__ BIND(L_loop); + Address cardtable(addr, count, Address::times_1, disp); + __ movb(cardtable, 0); + __ decrement(count); + __ jcc(Assembler::greaterEqual, L_loop); +#endif + +__ BIND(L_done); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler { +protected: + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, + Register count, Register tmp); +}; + +#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/shared/modRefBarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "asm/macroAssembler.inline.hpp" +#include "gc/shared/modRefBarrierSetAssembler.hpp" + +#define __ masm-> + +void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; + bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); + + if (type == T_OBJECT || type == T_ARRAY) { +#ifdef _LP64 + if (!checkcast && !obj_int) { + // Save count for barrier + __ movptr(r11, count); + } else if (disjoint && obj_int) { + // Save dst in r11 in the disjoint case + __ movq(r11, dst); + } +#else + if (disjoint) { + __ mov(rdx, dst); // save 'to' + } +#endif + gen_write_ref_array_pre_barrier(masm, decorators, dst, count); + } +} + +void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count) { + bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; + bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; + bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); + Register tmp = rax; + + if (type == T_OBJECT || type == T_ARRAY) { +#ifdef _LP64 + if (!checkcast && !obj_int) { + // Save count for barrier + count = r11; + } else if (disjoint && obj_int) { + // Use the saved dst in the disjoint case + dst = r11; + } else if (checkcast) { + tmp = rscratch1; + } +#else + if (disjoint) { + __ mov(dst, rdx); // restore 'to' + } +#endif + gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/x86/gc/shared/modRefBarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP +#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP + +#include "asm/macroAssembler.hpp" +#include "gc/shared/barrierSetAssembler.hpp" + +class ModRefBarrierSetAssembler: public BarrierSetAssembler { +protected: + virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {} + virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {} + +public: + virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); + virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, + Register src, Register dst, Register count); +}; + +#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -35,6 +35,7 @@ #include "prims/jvmtiThreadState.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/frame.inline.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/x86/interpreterRT_x86.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/interpreterRT_x86.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,8 @@ #ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP #define CPU_X86_VM_INTERPRETERRT_X86_HPP -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. // native method calls @@ -55,19 +56,7 @@ public: // Creation - SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) { - _masm = new MacroAssembler(buffer); -#ifdef AMD64 -#ifdef _WIN64 - _num_args = (method->is_static() ? 1 : 0); - _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address -#else - _num_int_args = (method->is_static() ? 1 : 0); - _num_fp_args = 0; - _stack_offset = wordSize; // don't overwrite return address -#endif // _WIN64 -#endif // AMD64 - } + SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer); // Code generation void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -31,7 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" @@ -39,6 +40,21 @@ // Implementation of SignatureHandlerGenerator +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : + NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); +#ifdef AMD64 +#ifdef _WIN64 + _num_args = (method->is_static() ? 1 : 0); + _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address +#else + _num_int_args = (method->is_static() ? 1 : 0); + _num_fp_args = 0; + _stack_offset = wordSize; // don't overwrite return address +#endif // _WIN64 +#endif // AMD64 +} + void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { move(offset(), jni_offset() + 1); }
--- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "interpreter/interp_masm.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -31,13 +32,28 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" #define __ _masm-> // Implementation of SignatureHandlerGenerator +InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : + NativeSignatureIterator(method) { + _masm = new MacroAssembler(buffer); +#ifdef AMD64 +#ifdef _WIN64 + _num_args = (method->is_static() ? 1 : 0); + _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address +#else + _num_int_args = (method->is_static() ? 1 : 0); + _num_fp_args = 0; + _stack_offset = wordSize; // don't overwrite return address +#endif // _WIN64 +#endif // AMD64 +} + Register InterpreterRuntime::SignatureHandlerGenerator::from() { return r14; } Register InterpreterRuntime::SignatureHandlerGenerator::to() { return rsp; } Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
--- a/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "compiler/disassembler.hpp" #include "oops/oop.inline.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/sharedRuntime.hpp" #include "jvmci/jvmciEnv.hpp"
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -28,7 +28,7 @@ #include "asm/assembler.inline.hpp" #include "compiler/disassembler.hpp" #include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" @@ -36,7 +36,7 @@ #include "oops/klass.inline.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" @@ -5409,8 +5409,8 @@ Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf())); - CardTableModRefBS* ctbs = - barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set()); + CardTableBarrierSet* ctbs = + barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set()); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); @@ -5497,10 +5497,10 @@ // Does a store check for the oop in register obj. The content of // register obj is destroyed afterwards. BarrierSet* bs = Universe::heap()->barrier_set(); - assert(bs->kind() == BarrierSet::CardTableModRef, + assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind"); - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); + CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs); CardTable* ct = ctbs->card_table(); assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,6 +31,8 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "utilities/preserveException.hpp" #define __ _masm->
--- a/src/hotspot/cpu/x86/runtime_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/runtime_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,7 +31,6 @@ #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/runtime_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/runtime_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -30,7 +30,6 @@ #include "code/vmreg.hpp" #include "interpreter/interpreter.hpp" #include "opto/runtime.hpp" -#include "runtime/interfaceSupport.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -28,10 +28,12 @@ #include "code/debugInfoRec.hpp" #include "code/icBuffer.hpp" #include "code/vtableStubs.hpp" +#include "gc/shared/gcLocker.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -32,10 +32,12 @@ #include "code/icBuffer.hpp" #include "code/nativeInst.hpp" #include "code/vtableStubs.hpp" +#include "gc/shared/gcLocker.hpp" #include "interpreter/interpreter.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,8 +25,8 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -668,107 +668,6 @@ return start; } - // - // Generate pre-barrier for array stores - // - // Input: - // start - starting address - // count - element count - void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { - assert_different_registers(start, count); - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { -#if INCLUDE_ALL_GCS - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target in uninitialized - if (!uninitialized_target) { - Register thread = rax; - Label filtered; - __ push(thread); - __ get_thread(thread); - Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + - SATBMarkQueue::byte_offset_of_active())); - // Is marking active? - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ cmpl(in_progress, 0); - } else { - assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); - __ cmpb(in_progress, 0); - } - __ pop(thread); - __ jcc(Assembler::equal, filtered); - - __ pusha(); // push registers - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), - start, count); - __ popa(); - - __ bind(filtered); - } - break; -#endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: - break; - default : - ShouldNotReachHere(); - - } - } - - - // - // Generate a post-barrier for an array store - // - // start - starting address - // count - element count - // - // The two input registers are overwritten. - // - void gen_write_ref_array_post_barrier(Register start, Register count) { - BarrierSet* bs = Universe::heap()->barrier_set(); - assert_different_registers(start, count); - switch (bs->kind()) { -#if INCLUDE_ALL_GCS - case BarrierSet::G1BarrierSet: - { - __ pusha(); // push registers - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), - start, count); - __ popa(); - } - break; -#endif // INCLUDE_ALL_GCS - - case BarrierSet::CardTableModRef: - { - CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs); - CardTable* ct = ctbs->card_table(); - assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code"); - - Label L_loop; - const Register end = count; // elements count; end == start+count-1 - assert_different_registers(start, end); - - __ lea(end, Address(start, count, Address::times_ptr, -wordSize)); - __ shrptr(start, CardTable::card_shift); - __ shrptr(end, CardTable::card_shift); - __ subptr(end, start); // end --> count - __ BIND(L_loop); - intptr_t disp = (intptr_t) ct->byte_map_base(); - Address cardtable(start, count, Address::times_1, disp); - __ movb(cardtable, 0); - __ decrement(count); - __ jcc(Assembler::greaterEqual, L_loop); - } - break; - case BarrierSet::ModRef: - break; - default : - ShouldNotReachHere(); - - } - } - // Copy 64 bytes chunks // @@ -936,10 +835,19 @@ if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); - __ mov(saved_to, to); // save 'to' } + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, t, from, to, count); + __ subptr(to, from); // to --> to_from __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp @@ -1024,10 +932,10 @@ __ BIND(L_copy_2_bytes); } + __ movl(count, Address(rsp, 12+12)); // reread 'count' + bs->arraycopy_epilogue(_masm, decorators, t, from, to, count); + if (t == T_OBJECT) { - __ movl(count, Address(rsp, 12+12)); // reread 'count' - __ mov(to, saved_to); // restore 'to' - gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); @@ -1116,9 +1024,19 @@ if (t == T_OBJECT) { __ testl(count, count); __ jcc(Assembler::zero, L_0_count); - gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); } + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, t, from, to, count); + // copy from high to low __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp @@ -1216,9 +1134,11 @@ } else { __ BIND(L_copy_2_bytes); } + + __ movl2ptr(count, Address(rsp, 12+12)); // reread count + bs->arraycopy_epilogue(_masm, decorators, t, from, to, count); + if (t == T_OBJECT) { - __ movl2ptr(count, Address(rsp, 12+12)); // reread count - gen_write_ref_array_post_barrier(to, count); __ BIND(L_0_count); } inc_copy_counter_np(t); @@ -1463,8 +1383,16 @@ Address to_element_addr(end_to, count, Address::times_ptr, 0); Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + + BasicType type = T_OBJECT; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, type, from, to, count); + // Copy from low to high addresses, indexed from the end of each array. - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); __ lea(end_from, end_from_addr); __ lea(end_to, end_to_addr); assert(length == count, ""); // else fix next line: @@ -1521,7 +1449,7 @@ __ BIND(L_post_barrier); __ movptr(to, to_arg); // reload - gen_write_ref_array_post_barrier(to, count); + bs->arraycopy_epilogue(_masm, decorators, type, from, to, count); // Common exit point (success or failure). __ BIND(L_done);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,8 +26,8 @@ #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "ci/ciUtilities.hpp" -#include "gc/shared/cardTable.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/barrierSet.hpp" +#include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interpreter.hpp" #include "nativeInst_x86.hpp" #include "oops/instanceOop.hpp" @@ -1190,119 +1190,6 @@ #endif } - // Generate code for an array write pre barrier - // - // addr - starting address - // count - element count - // tmp - scratch register - // - // Destroy no registers! - // - void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - // With G1, don't generate the call if we statically know that the target in uninitialized - if (!dest_uninitialized) { - Label filtered; - Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() + - SATBMarkQueue::byte_offset_of_active())); - // Is marking active? - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ cmpl(in_progress, 0); - } else { - assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); - __ cmpb(in_progress, 0); - } - __ jcc(Assembler::equal, filtered); - - __ pusha(); // push registers - if (count == c_rarg0) { - if (addr == c_rarg1) { - // exactly backwards!! - __ xchgptr(c_rarg1, c_rarg0); - } else { - __ movptr(c_rarg1, count); - __ movptr(c_rarg0, addr); - } - } else { - __ movptr(c_rarg0, addr); - __ movptr(c_rarg1, count); - } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); - __ popa(); - - __ bind(filtered); - } - break; - case BarrierSet::CardTableModRef: - break; - default: - ShouldNotReachHere(); - - } - } - - // - // Generate code for an array write post barrier - // - // Input: - // start - register containing starting address of destination array - // count - elements count - // scratch - scratch register - // - // The input registers are overwritten. - // - void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) { - assert_different_registers(start, count, scratch); - BarrierSet* bs = Universe::heap()->barrier_set(); - switch (bs->kind()) { - case BarrierSet::G1BarrierSet: - { - __ pusha(); // push registers (overkill) - if (c_rarg0 == count) { // On win64 c_rarg0 == rcx - assert_different_registers(c_rarg1, start); - __ mov(c_rarg1, count); - __ mov(c_rarg0, start); - } else { - assert_different_registers(c_rarg0, count); - __ mov(c_rarg0, start); - __ mov(c_rarg1, count); - } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2); - __ popa(); - } - break; - case BarrierSet::CardTableModRef: - { - Label L_loop, L_done; - const Register end = count; - - __ testl(count, count); - __ jcc(Assembler::zero, L_done); // zero count - nothing to do - - __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size - __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive - __ shrptr(start, CardTable::card_shift); - __ shrptr(end, CardTable::card_shift); - __ subptr(end, start); // end --> cards count - - int64_t disp = ci_card_table_address_as<int64_t>(); - __ mov64(scratch, disp); - __ addptr(start, scratch); - __ BIND(L_loop); - __ movb(Address(start, count, Address::times_1), 0); - __ decrement(count); - __ jcc(Assembler::greaterEqual, L_loop); - __ BIND(L_done); - } - break; - default: - ShouldNotReachHere(); - - } - } - // Copy big chunks forward // @@ -1918,7 +1805,6 @@ const Register qword_count = count; const Register end_from = from; // source array end address const Register end_to = to; // destination array end address - const Register saved_to = r11; // saved destination array address // End pointers are inclusive, and if count is not zero they point // to the last unit copied: end_to[0] := end_from[0] @@ -1933,10 +1819,18 @@ setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers - if (is_oop) { - __ movq(saved_to, to); - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BasicType type = is_oop ? T_OBJECT : T_INT; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, type, from, to, count); // 'from', 'to' and 'count' are now valid __ movptr(dword_count, count); @@ -1963,9 +1857,7 @@ __ movl(Address(end_to, 8), rax); __ BIND(L_exit); - if (is_oop) { - gen_write_ref_array_post_barrier(saved_to, dword_count, rax); - } + bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs(); inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free __ vzeroupper(); @@ -2022,10 +1914,18 @@ setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers - if (is_oop) { - // no registers are destroyed by this call - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + DecoratorSet decorators = 0; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BasicType type = is_oop ? T_OBJECT : T_INT; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + // no registers are destroyed by this call + bs->arraycopy_prologue(_masm, decorators, type, from, to, count); assert_clean_int(count, rax); // Make sure 'count' is clean int. // 'from', 'to' and 'count' are now valid @@ -2062,9 +1962,7 @@ copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); __ BIND(L_exit); - if (is_oop) { - gen_write_ref_array_post_barrier(to, dword_count, rax); - } + bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count); restore_arg_regs(); inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free __ xorptr(rax, rax); // return 0 @@ -2102,7 +2000,6 @@ const Register qword_count = rdx; // elements count const Register end_from = from; // source array end address const Register end_to = rcx; // destination array end address - const Register saved_to = to; const Register saved_count = r11; // End pointers are inclusive, and if count is not zero they point // to the last unit copied: end_to[0] := end_from[0] @@ -2120,12 +2017,18 @@ setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'qword_count' are now valid - if (is_oop) { - // Save to and count for store barrier - __ movptr(saved_count, qword_count); - // no registers are destroyed by this call - gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); + + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BasicType type = is_oop ? T_OBJECT : T_LONG; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); // Copy from low to high addresses. Use 'to' as scratch. __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); @@ -2154,10 +2057,8 @@ // Copy in multi-bytes chunks copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); - if (is_oop) { __ BIND(L_exit); - gen_write_ref_array_post_barrier(saved_to, saved_count, rax); - } + bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); restore_arg_regs(); if (is_oop) { inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free @@ -2209,12 +2110,18 @@ setup_arg_regs(); // from => rdi, to => rsi, count => rdx // r9 and r10 may be used to save non-volatile registers // 'from', 'to' and 'qword_count' are now valid - if (is_oop) { - // Save to and count for store barrier - __ movptr(saved_count, qword_count); - // No registers are destroyed by this call - gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); + + DecoratorSet decorators = ARRAYCOPY_DISJOINT; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; } + if (aligned) { + decorators |= ARRAYCOPY_ALIGNED; + } + + BasicType type = is_oop ? T_OBJECT : T_LONG; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count); __ jmp(L_copy_bytes); @@ -2239,10 +2146,8 @@ // Copy in multi-bytes chunks copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes); - if (is_oop) { __ BIND(L_exit); - gen_write_ref_array_post_barrier(to, saved_count, rax); - } + bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count); restore_arg_regs(); if (is_oop) { inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free @@ -2389,7 +2294,14 @@ Address from_element_addr(end_from, count, TIMES_OOP, 0); Address to_element_addr(end_to, count, TIMES_OOP, 0); - gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); + DecoratorSet decorators = ARRAYCOPY_CHECKCAST; + if (dest_uninitialized) { + decorators |= AS_DEST_NOT_INITIALIZED; + } + + BasicType type = T_OBJECT; + BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler(); + bs->arraycopy_prologue(_masm, decorators, type, from, to, count); // Copy from low to high addresses, indexed from the end of each array. __ lea(end_from, end_from_addr); @@ -2442,7 +2354,7 @@ __ xorptr(rax, rax); // return 0 on success __ BIND(L_post_barrier); - gen_write_ref_array_post_barrier(to, r14_length, rscratch1); + bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length); // Common exit point (success or failure). __ BIND(L_done);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -33,6 +33,8 @@ #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/safepointMechanism.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" @@ -198,7 +200,7 @@ } break; #endif // INCLUDE_ALL_GCS - case BarrierSet::CardTableModRef: + case BarrierSet::CardTableBarrierSet: { if (val == noreg) { __ store_heap_oop_null(obj);
--- a/src/hotspot/cpu/x86/x86_32.ad Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/x86_32.ad Sat Mar 24 01:08:35 2018 +0100 @@ -391,7 +391,7 @@ int format) { #ifdef ASSERT if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) { - assert(oopDesc::is_oop(cast_to_oop(d32)) && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code"); + assert(oopDesc::is_oop(cast_to_oop(d32)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop(d32))), "cannot embed scavengable oops in code"); } #endif cbuf.relocate(cbuf.insts_mark(), rspec, format); @@ -786,7 +786,7 @@ } if (cbuf) { MacroAssembler _masm(cbuf); - // EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations, + // EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations, // it maps more cases to single byte displacement _masm.set_managed(); if (reg_lo+1 == reg_hi) { // double move? @@ -976,7 +976,7 @@ dst_offset_size = (tmp_dst_offset == 0) ? 0 : ((tmp_dst_offset < 0x80) ? 1 : 4); calc_size += 3+src_offset_size + 3+dst_offset_size; break; - } + } case Op_VecX: case Op_VecY: case Op_VecZ:
--- a/src/hotspot/cpu/x86/x86_64.ad Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/x86/x86_64.ad Sat Mar 24 01:08:35 2018 +0100 @@ -669,7 +669,7 @@ if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop"); - assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)) && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); + assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop((intptr_t)d32))), "cannot embed scavengable oops in code"); } #endif cbuf.relocate(cbuf.insts_mark(), rspec, format); @@ -696,7 +696,7 @@ if (rspec.reloc()->type() == relocInfo::oop_type && d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop"); - assert(oopDesc::is_oop(cast_to_oop(d64)) && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()), + assert(oopDesc::is_oop(cast_to_oop(d64)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop(d64))), "cannot embed scavengable oops in code"); } #endif
--- a/src/hotspot/cpu/zero/assembler_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/assembler_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,13 +25,13 @@ #include "precompiled.hpp" #include "assembler_zero.inline.hpp" -#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "interpreter/interpreter.hpp" #include "memory/resourceArea.hpp" #include "prims/methodHandles.hpp" #include "runtime/biasedLocking.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/os.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -41,7 +41,7 @@ #include "runtime/atomic.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/zero/frame_zero.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/frame_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -75,4 +75,6 @@ char* buf, int buflen) const; + static jint interpreter_frame_expression_stack_direction() { return -1; } + #endif // CPU_ZERO_VM_FRAME_ZERO_HPP
--- a/src/hotspot/cpu/zero/frame_zero.inline.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/frame_zero.inline.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -130,10 +130,6 @@ return monitor_end - 1; } -inline jint frame::interpreter_frame_expression_stack_direction() { - return -1; -} - // Return a unique id for this frame. The id must have a value where // we can distinguish identity and younger/older relationship. NULL // represents an invalid (incomparable) frame.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/zero/gc/g1/g1BarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP +#define CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP + +class G1BarrierSetAssembler; + +#endif // CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/zero/gc/shared/barrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP +#define CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP + +class BarrierSetAssembler; + +#endif // CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/zero/gc/shared/cardTableBarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP +#define CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP + +class CardTableBarrierSetAssembler; + +#endif // CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/cpu/zero/gc/shared/modRefBarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP +#define CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP + +class ModRefBarrierSetAssembler; + +#endif // CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
--- a/src/hotspot/cpu/zero/interpreterRT_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/interpreterRT_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -32,7 +32,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/icache.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/signature.hpp" #include "stack_zero.inline.hpp" #include "utilities/align.hpp"
--- a/src/hotspot/cpu/zero/interpreterRT_zero.hpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/interpreterRT_zero.hpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,7 +26,9 @@ #ifndef CPU_ZERO_VM_INTERPRETERRT_ZERO_HPP #define CPU_ZERO_VM_INTERPRETERRT_ZERO_HPP -#include "memory/allocation.hpp" +// This is included in the middle of class Interpreter. +// Do not include files here. + class SignatureHandler { public:
--- a/src/hotspot/cpu/zero/methodHandles_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/methodHandles_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2009, 2010, 2011 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,6 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/method.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/frame.inline.hpp" #include "prims/methodHandles.hpp" void MethodHandles::invoke_target(Method* method, TRAPS) {
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -31,6 +31,7 @@ #include "code/vtableStubs.hpp" #include "interpreter/interpreter.hpp" #include "oops/compiledICHolder.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/vframeArray.hpp" #include "vmreg_zero.inline.hpp"
--- a/src/hotspot/cpu/zero/stack_zero.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/cpu/zero/stack_zero.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2010 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -24,10 +24,12 @@ */ #include "precompiled.hpp" +#include "interpreter/bytecodeInterpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "runtime/thread.hpp" #include "stack_zero.hpp" #include "stack_zero.inline.hpp" +#include "runtime/frame.inline.hpp" #include "utilities/align.hpp" // Inlined causes circular inclusion with thread.hpp
--- a/src/hotspot/os/aix/attachListener_aix.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/aix/attachListener_aix.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,7 +24,7 @@ */ #include "precompiled.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.inline.hpp" #include "services/attachListener.hpp" #include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/aix/jvm_aix.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/aix/jvm_aix.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "jvm.h" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/osThread.hpp" #include <signal.h>
--- a/src/hotspot/os/aix/os_aix.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/aix/os_aix.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -54,7 +54,7 @@ #include "runtime/atomic.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/bsd/attachListener_bsd.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.inline.hpp" #include "services/attachListener.hpp" #include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/bsd/jvm_bsd.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/bsd/jvm_bsd.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "jvm.h" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/osThread.hpp" #include <signal.h>
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/bsd/os_bsd.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -44,7 +44,7 @@ #include "runtime/atomic.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os/linux/attachListener_linux.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/linux/attachListener_linux.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -23,7 +23,8 @@ */ #include "precompiled.hpp" -#include "runtime/interfaceSupport.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.inline.hpp" #include "services/attachListener.hpp" #include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/linux/jvm_linux.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/linux/jvm_linux.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "jvm.h" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/osThread.hpp" #include <signal.h>
--- a/src/hotspot/os/linux/os_linux.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/linux/os_linux.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -45,7 +45,7 @@ #include "runtime/atomic.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp"
--- a/src/hotspot/os/posix/os_posix.cpp Thu Mar 29 20:12:02 2018 +0100 +++ b/src/hotspot/os/posix/os_posix.cpp Sat Mar 24 01:08:35 2018 +0100 @@ -26,7 +26,7 @@ #include "memory/allocation.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "runtime/frame.inline.hpp" -#include "runtime/interfaceSupport.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp"