OpenJDK / portola / portola
changeset 33070:54f3f085b165
8136525: Generate interpreter entries only once and avoid unnecessary jump to jump
Reviewed-by: coleenp, twisti, aph
line wrap: on
line diff
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -42,6 +42,11 @@ // Implementation of InterpreterMacroAssembler +void InterpreterMacroAssembler::jump_to_entry(address entry) { + assert(entry, "Entry must have been generated by now"); + b(entry); +} + #ifndef CC_INTERP void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -66,6 +66,8 @@ void load_earlyret_value(TosState state); + void jump_to_entry(address entry); + #ifdef CC_INTERP void save_bcp() { /* not needed in c++ interpreter and harmless */ } void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
--- a/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/interpreterGenerator_aarch64.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -41,9 +41,8 @@ address generate_native_entry(bool synchronized); address generate_abstract_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind); - address generate_jump_to_normal_entry(void); - address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); } - address generate_empty_entry(void) { return generate_jump_to_normal_entry(); } + address generate_accessor_entry(void) { return NULL; } + address generate_empty_entry(void) { return NULL; } void generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs); address generate_Reference_get_entry(); address generate_CRC32_update_entry();
--- a/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/interpreter_aarch64.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -236,17 +236,6 @@ __ blrt(rscratch1, gpargs, fpargs, rtype); } -// Jump into normal path for accessor and empty entry to jump to normal entry -// The "fast" optimization don't update compilation count therefore can disable inlining -// for these functions that should be inlined. -address InterpreterGenerator::generate_jump_to_normal_entry(void) { - address entry_point = __ pc(); - - assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated"); - __ b(Interpreter::entry_for_kind(Interpreter::zerolocals)); - return entry_point; -} - // Abstract method entry // Attempt to execute abstract method. Throw exception address InterpreterGenerator::generate_abstract_entry(void) {
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -721,8 +721,7 @@ // generate a vanilla interpreter entry as the slow path __ bind(slow_path); - (void) generate_normal_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; } #endif // INCLUDE_ALL_GCS @@ -779,12 +778,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -841,12 +838,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } void InterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -46,7 +46,7 @@ MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry); } -void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) { +void InterpreterMacroAssembler::jump_to_entry(address entry, Register Rscratch) { assert(entry, "Entry must have been generated by now"); if (is_within_range_of_b(entry, pc())) { b(entry);
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -39,7 +39,7 @@ void null_check_throw(Register a, int offset, Register temp_reg); - void branch_to_entry(address entry, Register Rscratch); + void jump_to_entry(address entry, Register Rscratch); // Handy address generation macros. #define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
--- a/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -31,9 +31,8 @@ private: address generate_abstract_entry(void); - address generate_jump_to_normal_entry(void); - address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); } - address generate_empty_entry(void) { return generate_jump_to_normal_entry(); } + address generate_accessor_entry(void) { return NULL; } + address generate_empty_entry(void) { return NULL; } address generate_Reference_get_entry(void); address generate_CRC32_update_entry();
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -427,18 +427,6 @@ return entry; } -// Call an accessor method (assuming it is resolved, otherwise drop into -// vanilla (slow path) entry. -address InterpreterGenerator::generate_jump_to_normal_entry(void) { - address entry = __ pc(); - address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals); - assert(normal_entry != NULL, "should already be generated."); - __ branch_to_entry(normal_entry, R11_scratch1); - __ flush(); - - return entry; -} - // Abstract method entry. // address InterpreterGenerator::generate_abstract_entry(void) { @@ -529,13 +517,13 @@ // regular method entry code to generate the NPE. // - address entry = __ pc(); + if (UseG1GC) { + address entry = __ pc(); - const int referent_offset = java_lang_ref_Reference::referent_offset; - guarantee(referent_offset > 0, "referent offset not initialized"); + const int referent_offset = java_lang_ref_Reference::referent_offset; + guarantee(referent_offset > 0, "referent offset not initialized"); - if (UseG1GC) { - Label slow_path; + Label slow_path; // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH); @@ -577,13 +565,11 @@ // Generate regular method entry. __ bind(slow_path); - __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); - __ flush(); + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1); + return entry; + } - return entry; - } else { - return generate_jump_to_normal_entry(); - } + return NULL; } void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -620,7 +620,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { if (!math_entry_available(kind)) { NOT_PRODUCT(__ should_not_reach_here();) - return Interpreter::entry_for_kind(Interpreter::zerolocals); + return NULL; } address entry = __ pc(); @@ -1126,14 +1126,6 @@ generate_fixed_frame(false, Rsize_of_parameters, Rsize_of_locals); -#ifdef FAST_DISPATCH - __ unimplemented("Fast dispatch in generate_normal_entry"); -#if 0 - __ set((intptr_t)Interpreter::dispatch_table(), IdispatchTables); - // Set bytecode dispatch table base. -#endif -#endif - // -------------------------------------------------------------------------- // Zero out non-parameter locals. // Note: *Always* zero out non-parameter locals as Sparc does. It's not @@ -1266,9 +1258,8 @@ * int java.util.zip.CRC32.update(int crc, int b) */ address InterpreterGenerator::generate_CRC32_update_entry() { - address start = __ pc(); // Remember stub start address (is rtn value). - if (UseCRC32Intrinsics) { + address start = __ pc(); // Remember stub start address (is rtn value). Label slow_path; // Safepoint check @@ -1313,11 +1304,11 @@ // Generate a vanilla native entry as the slow path. BLOCK_COMMENT("} CRC32_update"); BIND(slow_path); + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); + return start; } - (void) generate_native_entry(false); - - return start; + return NULL; } // CRC32 Intrinsics. @@ -1327,9 +1318,8 @@ * int java.util.zip.CRC32.updateByteBuffer(int crc, long* buf, int off, int len) */ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { - address start = __ pc(); // Remember stub start address (is rtn value). - if (UseCRC32Intrinsics) { + address start = __ pc(); // Remember stub start address (is rtn value). Label slow_path; // Safepoint check @@ -1406,11 +1396,11 @@ // Generate a vanilla native entry as the slow path. BLOCK_COMMENT("} CRC32_updateBytes(Buffer)"); BIND(slow_path); + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), R11_scratch1); + return start; } - (void) generate_native_entry(false); - - return start; + return NULL; } // These should never be compiled since the interpreter will prefer
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -468,7 +468,7 @@ // If G1 is not enabled then attempt to go through the accessor entry point // Reference.get is an accessor - return generate_jump_to_normal_entry(); + return NULL; } //
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -59,6 +59,13 @@ #endif // CC_INTERP +void InterpreterMacroAssembler::jump_to_entry(address entry) { + assert(entry, "Entry must have been generated by now"); + AddressLiteral al(entry); + jump_to(al, G3_scratch); + delayed()->nop(); +} + void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { // Note: this algorithm is also used by C1's OSR entry sequence. // Any changes should also be applied to CodeEmitter::emit_osr_entry().
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -80,6 +80,8 @@ InterpreterMacroAssembler(CodeBuffer* c) : MacroAssembler(c) {} + void jump_to_entry(address entry); + #ifndef CC_INTERP virtual void load_earlyret_value(TosState state);
--- a/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -34,9 +34,8 @@ address generate_abstract_entry(void); // there are no math intrinsics on sparc address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; } - address generate_jump_to_normal_entry(void); - address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); } - address generate_empty_entry(void) { return generate_jump_to_normal_entry(); } + address generate_accessor_entry(void) { return NULL; } + address generate_empty_entry(void) { return NULL; } address generate_Reference_get_entry(void); void lock_method(void); void save_native_result(void);
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -241,15 +241,6 @@ // Various method entries -address InterpreterGenerator::generate_jump_to_normal_entry(void) { - address entry = __ pc(); - assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated"); - AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals)); - __ jump_to(al, G3_scratch); - __ delayed()->nop(); - return entry; -} - // Abstract method entry // Attempt to execute abstract method. Throw exception //
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -779,14 +779,14 @@ // Generate regular method entry __ bind(slow_path); - (void) generate_normal_entry(false); + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; } #endif // INCLUDE_ALL_GCS // If G1 is not enabled then attempt to go through the accessor entry point // Reference.get is an accessor - return generate_jump_to_normal_entry(); + return NULL; } //
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -807,7 +807,7 @@ // If G1 is not enabled then attempt to go through the accessor entry point // Reference.get is an accessor - return generate_jump_to_normal_entry(); + return NULL; } //
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -40,6 +40,11 @@ // Implementation of InterpreterMacroAssembler +void InterpreterMacroAssembler::jump_to_entry(address entry) { + assert(entry, "Entry must have been generated by now"); + jump(RuntimeAddress(entry)); +} + #ifndef CC_INTERP void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) { Label update, next, none;
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -60,6 +60,8 @@ _locals_register(LP64_ONLY(r14) NOT_LP64(rdi)), _bcp_register(LP64_ONLY(r13) NOT_LP64(rsi)) {} + void jump_to_entry(address entry); + void load_earlyret_value(TosState state); #ifdef CC_INTERP
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -31,17 +31,6 @@ #define __ _masm-> -// Jump into normal path for accessor and empty entry to jump to normal entry -// The "fast" optimization don't update compilation count therefore can disable inlining -// for these functions that should be inlined. -address InterpreterGenerator::generate_jump_to_normal_entry(void) { - address entry_point = __ pc(); - - assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated"); - __ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals))); - return entry_point; -} - // Abstract method entry // Attempt to execute abstract method. Throw exception address InterpreterGenerator::generate_abstract_entry(void) {
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Sep 17 09:03:57 2015 +0200 @@ -36,9 +36,8 @@ address generate_native_entry(bool synchronized); address generate_abstract_entry(void); address generate_math_entry(AbstractInterpreter::MethodKind kind); - address generate_jump_to_normal_entry(void); - address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); } - address generate_empty_entry(void) { return generate_jump_to_normal_entry(); } + address generate_accessor_entry(void) { return NULL; } + address generate_empty_entry(void) { return NULL; } address generate_Reference_get_entry(); address generate_CRC32_update_entry(); address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -697,15 +697,14 @@ __ jmp(rdi); __ bind(slow_path); - (void) generate_normal_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; } #endif // INCLUDE_ALL_GCS // If G1 is not enabled then attempt to go through the accessor entry point // Reference.get is an accessor - return generate_jump_to_normal_entry(); + return NULL; } /** @@ -753,12 +752,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -821,12 +818,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -873,7 +868,7 @@ return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -881,10 +876,8 @@ * java.lang.Float.intBitsToFloat(int bits) */ address InterpreterGenerator::generate_Float_intBitsToFloat_entry() { - address entry; - if (UseSSE >= 1) { - entry = __ pc(); + address entry = __ pc(); // rsi: the sender's SP @@ -898,11 +891,10 @@ __ pop(rdi); // get return address __ mov(rsp, rsi); // set rsp to the sender's SP __ jmp(rdi); - } else { - entry = generate_native_entry(false); + return entry; } - return entry; + return NULL; } /** @@ -910,10 +902,8 @@ * java.lang.Float.floatToRawIntBits(float value) */ address InterpreterGenerator::generate_Float_floatToRawIntBits_entry() { - address entry; - if (UseSSE >= 1) { - entry = __ pc(); + address entry = __ pc(); // rsi: the sender's SP @@ -927,11 +917,10 @@ __ pop(rdi); // get return address __ mov(rsp, rsi); // set rsp to the sender's SP __ jmp(rdi); - } else { - entry = generate_native_entry(false); + return entry; } - return entry; + return NULL; } @@ -940,10 +929,8 @@ * java.lang.Double.longBitsToDouble(long bits) */ address InterpreterGenerator::generate_Double_longBitsToDouble_entry() { - address entry; - if (UseSSE >= 2) { - entry = __ pc(); + address entry = __ pc(); // rsi: the sender's SP @@ -957,11 +944,10 @@ __ pop(rdi); // get return address __ mov(rsp, rsi); // set rsp to the sender's SP __ jmp(rdi); - } else { - entry = generate_native_entry(false); + return entry; } - return entry; + return NULL; } /** @@ -969,10 +955,8 @@ * java.lang.Double.doubleToRawLongBits(double value) */ address InterpreterGenerator::generate_Double_doubleToRawLongBits_entry() { - address entry; - if (UseSSE >= 2) { - entry = __ pc(); + address entry = __ pc(); // rsi: the sender's SP @@ -987,11 +971,10 @@ __ pop(rdi); // get return address __ mov(rsp, rsi); // set rsp to the sender's SP __ jmp(rdi); - } else { - entry = generate_native_entry(false); + return entry; } - return entry; + return NULL; } //
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -677,15 +677,14 @@ // generate a vanilla interpreter entry as the slow path __ bind(slow_path); - (void) generate_normal_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); return entry; } #endif // INCLUDE_ALL_GCS // If G1 is not enabled then attempt to go through the accessor entry point // Reference.get is an accessor - return generate_jump_to_normal_entry(); + return NULL; } /** @@ -733,12 +732,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -796,12 +793,10 @@ // generate a vanilla native entry as the slow path __ bind(slow_path); - - (void) generate_native_entry(false); - + __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } - return generate_native_entry(false); + return NULL; } /** @@ -852,7 +847,7 @@ return entry; } - return generate_native_entry(false); + return NULL; } // Interpreter stub for calling a native method. (asm interpreter)
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -816,7 +816,7 @@ // If G1 is not enabled then attempt to go through the normal entry point // Reference.get could be instrumented by jvmti - return generate_normal_entry(false); + return NULL; } address InterpreterGenerator::generate_native_entry(bool synchronized) {
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -104,7 +104,10 @@ (*_masm)->flush(); // Commit Codelet. - AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size(), (*_masm)->code()->strings()); + int committed_code_size = (*_masm)->code()->pure_insts_size(); + if (committed_code_size) { + AbstractInterpreter::code()->commit(committed_code_size, (*_masm)->code()->strings()); + } // Make sure nobody can use _masm outside a CodeletMark lifespan. *_masm = NULL; } @@ -546,17 +549,18 @@ address InterpreterGenerator::generate_method_entry( AbstractInterpreter::MethodKind kind) { // determine code generation flags + bool native = false; bool synchronized = false; address entry_point = NULL; switch (kind) { - case Interpreter::zerolocals : break; - case Interpreter::zerolocals_synchronized: synchronized = true; break; - case Interpreter::native : entry_point = generate_native_entry(false); break; - case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break; - case Interpreter::empty : entry_point = generate_empty_entry(); break; + case Interpreter::zerolocals : break; + case Interpreter::zerolocals_synchronized: synchronized = true; break; + case Interpreter::native : native = true; break; + case Interpreter::native_synchronized : native = true; synchronized = true; break; + case Interpreter::empty : entry_point = generate_empty_entry(); break; case Interpreter::accessor : entry_point = generate_accessor_entry(); break; - case Interpreter::abstract : entry_point = generate_abstract_entry(); break; + case Interpreter::abstract : entry_point = generate_abstract_entry(); break; case Interpreter::java_lang_math_sin : // fall thru case Interpreter::java_lang_math_cos : // fall thru @@ -571,11 +575,11 @@ : entry_point = generate_Reference_get_entry(); break; #ifndef CC_INTERP case Interpreter::java_util_zip_CRC32_update - : entry_point = generate_CRC32_update_entry(); break; + : native = true; entry_point = generate_CRC32_update_entry(); break; case Interpreter::java_util_zip_CRC32_updateBytes : // fall thru case Interpreter::java_util_zip_CRC32_updateByteBuffer - : entry_point = generate_CRC32_updateBytes_entry(kind); break; + : native = true; entry_point = generate_CRC32_updateBytes_entry(kind); break; case Interpreter::java_util_zip_CRC32C_updateBytes : // fall thru case Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer @@ -584,20 +588,20 @@ // On x86_32 platforms, a special entry is generated for the following four methods. // On other platforms the normal entry is used to enter these methods. case Interpreter::java_lang_Float_intBitsToFloat - : entry_point = generate_Float_intBitsToFloat_entry(); break; + : native = true; entry_point = generate_Float_intBitsToFloat_entry(); break; case Interpreter::java_lang_Float_floatToRawIntBits - : entry_point = generate_Float_floatToRawIntBits_entry(); break; + : native = true; entry_point = generate_Float_floatToRawIntBits_entry(); break; case Interpreter::java_lang_Double_longBitsToDouble - : entry_point = generate_Double_longBitsToDouble_entry(); break; + : native = true; entry_point = generate_Double_longBitsToDouble_entry(); break; case Interpreter::java_lang_Double_doubleToRawLongBits - : entry_point = generate_Double_doubleToRawLongBits_entry(); break; + : native = true; entry_point = generate_Double_doubleToRawLongBits_entry(); break; #else case Interpreter::java_lang_Float_intBitsToFloat: case Interpreter::java_lang_Float_floatToRawIntBits: case Interpreter::java_lang_Double_longBitsToDouble: - case Interpreter::java_lang_Double_doubleToRawLongBits: - entry_point = generate_native_entry(false); - break; + case Interpreter::java_lang_Double_doubleToRawLongBits: + native = true; + break; #endif // defined(TARGET_ARCH_x86) && !defined(_LP64) #endif // CC_INTERP default: @@ -609,5 +613,18 @@ return entry_point; } - return generate_normal_entry(synchronized); + // We expect the normal and native entry points to be generated first so we can reuse them. + if (native) { + entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::native_synchronized : Interpreter::native); + if (entry_point == NULL) { + entry_point = generate_native_entry(synchronized); + } + } else { + entry_point = Interpreter::entry_for_kind(synchronized ? Interpreter::zerolocals_synchronized : Interpreter::zerolocals); + if (entry_point == NULL) { + entry_point = generate_normal_entry(synchronized); + } + } + + return entry_point; }
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Sep 17 13:42:50 2015 -0700 +++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Sep 17 09:03:57 2015 +0200 @@ -412,6 +412,14 @@ method_entry(java_lang_math_pow ) method_entry(java_lang_ref_reference_get) + initialize_method_handle_entries(); + + // all native method kinds (must be one contiguous block) + Interpreter::_native_entry_begin = Interpreter::code()->code_end(); + method_entry(native) + method_entry(native_synchronized) + Interpreter::_native_entry_end = Interpreter::code()->code_end(); + if (UseCRC32Intrinsics) { method_entry(java_util_zip_CRC32_update) method_entry(java_util_zip_CRC32_updateBytes) @@ -428,14 +436,6 @@ method_entry(java_lang_Double_longBitsToDouble); method_entry(java_lang_Double_doubleToRawLongBits); - initialize_method_handle_entries(); - - // all native method kinds (must be one contiguous block) - Interpreter::_native_entry_begin = Interpreter::code()->code_end(); - method_entry(native) - method_entry(native_synchronized) - Interpreter::_native_entry_end = Interpreter::code()->code_end(); - #undef method_entry // Bytecodes