annotate src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp @ 12320:3f551de87e59

8169711: CDS does not patch entry trampoline if intrinsic method is disabled Summary: Always create interpreter method entries for intrinsified methods but replace them with vanilla entries if the intrinsic is disabled at runtime. Reviewed-by: kvn, iklam
author thartmann
date Mon, 21 Nov 2016 08:27:10 +0100
parents 80445c3c0f9f
children 52d18f20804b
rev   line source
aph@7879 1 /*
coleenp@9933 2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
aph@7879 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
aph@7879 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aph@7879 5 *
aph@7879 6 * This code is free software; you can redistribute it and/or modify it
aph@7879 7 * under the terms of the GNU General Public License version 2 only, as
aph@7879 8 * published by the Free Software Foundation.
aph@7879 9 *
aph@7879 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aph@7879 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aph@7879 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aph@7879 13 * version 2 for more details (a copy is included in the LICENSE file that
aph@7879 14 * accompanied this code).
aph@7879 15 *
aph@7879 16 * You should have received a copy of the GNU General Public License version
aph@7879 17 * 2 along with this work; if not, write to the Free Software Foundation,
aph@7879 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aph@7879 19 *
aph@7879 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aph@7879 21 * or visit www.oracle.com if you need additional information or have any
aph@7879 22 * questions.
aph@7879 23 *
aph@7879 24 */
aph@7879 25
aph@7879 26 #include "precompiled.hpp"
aph@7879 27 #include "asm/macroAssembler.hpp"
aph@7879 28 #include "interpreter/bytecodeHistogram.hpp"
aph@7879 29 #include "interpreter/interpreter.hpp"
aph@7879 30 #include "interpreter/interpreterRuntime.hpp"
aph@7879 31 #include "interpreter/interp_masm.hpp"
coleenp@9879 32 #include "interpreter/templateInterpreterGenerator.hpp"
aph@7879 33 #include "interpreter/templateTable.hpp"
aph@7879 34 #include "interpreter/bytecodeTracer.hpp"
jprovino@10761 35 #include "memory/resourceArea.hpp"
aph@7879 36 #include "oops/arrayOop.hpp"
aph@7879 37 #include "oops/methodData.hpp"
aph@7879 38 #include "oops/method.hpp"
aph@7879 39 #include "oops/oop.inline.hpp"
aph@7879 40 #include "prims/jvmtiExport.hpp"
aph@7879 41 #include "prims/jvmtiThreadState.hpp"
aph@7879 42 #include "runtime/arguments.hpp"
aph@7879 43 #include "runtime/deoptimization.hpp"
aph@7879 44 #include "runtime/frame.inline.hpp"
aph@7879 45 #include "runtime/sharedRuntime.hpp"
aph@7879 46 #include "runtime/stubRoutines.hpp"
aph@7879 47 #include "runtime/synchronizer.hpp"
aph@7879 48 #include "runtime/timer.hpp"
aph@7879 49 #include "runtime/vframeArray.hpp"
aph@7879 50 #include "utilities/debug.hpp"
aph@7879 51 #include <sys/types.h>
aph@7879 52
aph@7879 53 #ifndef PRODUCT
aph@7879 54 #include "oops/method.hpp"
aph@7879 55 #endif // !PRODUCT
aph@7879 56
aph@7879 57 #ifdef BUILTIN_SIM
aph@7879 58 #include "../../../../../../simulator/simulator.hpp"
aph@7879 59 #endif
aph@7879 60
coleenp@9933 61 // Size of interpreter code. Increase if too small. Interpreter will
coleenp@9933 62 // fail with a guarantee ("not enough space for interpreter generation");
coleenp@9933 63 // if too small.
coleenp@9933 64 // Run with +PrintInterpreter to get the VM to print out the size.
coleenp@9933 65 // Max size with JVMTI
coleenp@9933 66 int TemplateInterpreter::InterpreterCodeSize = 200 * 1024;
coleenp@9933 67
aph@7879 68 #define __ _masm->
aph@7879 69
aph@7879 70 //-----------------------------------------------------------------------------
aph@7879 71
aph@7879 72 extern "C" void entry(CodeBuffer*);
aph@7879 73
aph@7879 74 //-----------------------------------------------------------------------------
aph@7879 75
coleenp@9933 76 address TemplateInterpreterGenerator::generate_slow_signature_handler() {
coleenp@9933 77 address entry = __ pc();
coleenp@9933 78
coleenp@9933 79 __ andr(esp, esp, -16);
coleenp@9933 80 __ mov(c_rarg3, esp);
coleenp@9933 81 // rmethod
coleenp@9933 82 // rlocals
coleenp@9933 83 // c_rarg3: first stack arg - wordSize
coleenp@9933 84
coleenp@9933 85 // adjust sp
coleenp@9933 86 __ sub(sp, c_rarg3, 18 * wordSize);
coleenp@9933 87 __ str(lr, Address(__ pre(sp, -2 * wordSize)));
coleenp@9933 88 __ call_VM(noreg,
coleenp@9933 89 CAST_FROM_FN_PTR(address,
coleenp@9933 90 InterpreterRuntime::slow_signature_handler),
coleenp@9933 91 rmethod, rlocals, c_rarg3);
coleenp@9933 92
coleenp@9933 93 // r0: result handler
coleenp@9933 94
coleenp@9933 95 // Stack layout:
coleenp@9933 96 // rsp: return address <- sp
coleenp@9933 97 // 1 garbage
coleenp@9933 98 // 8 integer args (if static first is unused)
coleenp@9933 99 // 1 float/double identifiers
coleenp@9933 100 // 8 double args
coleenp@9933 101 // stack args <- esp
coleenp@9933 102 // garbage
coleenp@9933 103 // expression stack bottom
coleenp@9933 104 // bcp (NULL)
coleenp@9933 105 // ...
coleenp@9933 106
coleenp@9933 107 // Restore LR
coleenp@9933 108 __ ldr(lr, Address(__ post(sp, 2 * wordSize)));
coleenp@9933 109
coleenp@9933 110 // Do FP first so we can use c_rarg3 as temp
coleenp@9933 111 __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers
coleenp@9933 112
coleenp@9933 113 for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
coleenp@9933 114 const FloatRegister r = as_FloatRegister(i);
coleenp@9933 115
coleenp@9933 116 Label d, done;
coleenp@9933 117
coleenp@9933 118 __ tbnz(c_rarg3, i, d);
coleenp@9933 119 __ ldrs(r, Address(sp, (10 + i) * wordSize));
coleenp@9933 120 __ b(done);
coleenp@9933 121 __ bind(d);
coleenp@9933 122 __ ldrd(r, Address(sp, (10 + i) * wordSize));
coleenp@9933 123 __ bind(done);
coleenp@9933 124 }
coleenp@9933 125
coleenp@9933 126 // c_rarg0 contains the result from the call of
coleenp@9933 127 // InterpreterRuntime::slow_signature_handler so we don't touch it
coleenp@9933 128 // here. It will be loaded with the JNIEnv* later.
coleenp@9933 129 __ ldr(c_rarg1, Address(sp, 1 * wordSize));
coleenp@9933 130 for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
coleenp@9933 131 Register rm = as_Register(i), rn = as_Register(i+1);
coleenp@9933 132 __ ldp(rm, rn, Address(sp, i * wordSize));
coleenp@9933 133 }
coleenp@9933 134
coleenp@9933 135 __ add(sp, sp, 18 * wordSize);
coleenp@9933 136 __ ret(lr);
coleenp@9933 137
coleenp@9933 138 return entry;
coleenp@9933 139 }
coleenp@9933 140
coleenp@9933 141
coleenp@9933 142 //
coleenp@9933 143 // Various method entries
coleenp@9933 144 //
coleenp@9933 145
coleenp@9933 146 address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
coleenp@9933 147 // rmethod: Method*
coleenp@9933 148 // r13: sender sp
coleenp@9933 149 // esp: args
coleenp@9933 150
coleenp@9933 151 if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
coleenp@9933 152
coleenp@9933 153 // These don't need a safepoint check because they aren't virtually
coleenp@9933 154 // callable. We won't enter these intrinsics from compiled code.
coleenp@9933 155 // If in the future we added an intrinsic which was virtually callable
coleenp@9933 156 // we'd have to worry about how to safepoint so that this code is used.
coleenp@9933 157
coleenp@9933 158 // mathematical functions inlined by compiler
coleenp@9933 159 // (interpreter must provide identical implementation
coleenp@9933 160 // in order to avoid monotonicity bugs when switching
coleenp@9933 161 // from interpreter to compiler in the middle of some
coleenp@9933 162 // computation)
coleenp@9933 163 //
coleenp@9933 164 // stack:
coleenp@9933 165 // [ arg ] <-- esp
coleenp@9933 166 // [ arg ]
coleenp@9933 167 // retaddr in lr
coleenp@9933 168
coleenp@9933 169 address entry_point = NULL;
coleenp@9933 170 Register continuation = lr;
coleenp@9933 171 switch (kind) {
coleenp@9933 172 case Interpreter::java_lang_math_abs:
coleenp@9933 173 entry_point = __ pc();
coleenp@9933 174 __ ldrd(v0, Address(esp));
coleenp@9933 175 __ fabsd(v0, v0);
coleenp@9933 176 __ mov(sp, r13); // Restore caller's SP
coleenp@9933 177 break;
coleenp@9933 178 case Interpreter::java_lang_math_sqrt:
coleenp@9933 179 entry_point = __ pc();
coleenp@9933 180 __ ldrd(v0, Address(esp));
coleenp@9933 181 __ fsqrtd(v0, v0);
coleenp@9933 182 __ mov(sp, r13);
coleenp@9933 183 break;
coleenp@9933 184 case Interpreter::java_lang_math_sin :
coleenp@9933 185 case Interpreter::java_lang_math_cos :
coleenp@9933 186 case Interpreter::java_lang_math_tan :
coleenp@9933 187 case Interpreter::java_lang_math_log :
coleenp@9933 188 case Interpreter::java_lang_math_log10 :
coleenp@9933 189 case Interpreter::java_lang_math_exp :
coleenp@9933 190 entry_point = __ pc();
coleenp@9933 191 __ ldrd(v0, Address(esp));
coleenp@9933 192 __ mov(sp, r13);
coleenp@9933 193 __ mov(r19, lr);
coleenp@9933 194 continuation = r19; // The first callee-saved register
coleenp@9933 195 generate_transcendental_entry(kind, 1);
coleenp@9933 196 break;
coleenp@9933 197 case Interpreter::java_lang_math_pow :
coleenp@9933 198 entry_point = __ pc();
coleenp@9933 199 __ mov(r19, lr);
coleenp@9933 200 continuation = r19;
coleenp@9933 201 __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize));
coleenp@9933 202 __ ldrd(v1, Address(esp));
coleenp@9933 203 __ mov(sp, r13);
coleenp@9933 204 generate_transcendental_entry(kind, 2);
coleenp@9933 205 break;
thartmann@12320 206 case Interpreter::java_lang_math_fmaD :
thartmann@12320 207 case Interpreter::java_lang_math_fmaF :
thartmann@12320 208 return NULL;
coleenp@9933 209 default:
coleenp@9933 210 ;
coleenp@9933 211 }
coleenp@9933 212 if (entry_point) {
coleenp@9933 213 __ br(continuation);
coleenp@9933 214 }
coleenp@9933 215
coleenp@9933 216 return entry_point;
coleenp@9933 217 }
coleenp@9933 218
coleenp@9933 219 // double trigonometrics and transcendentals
coleenp@9933 220 // static jdouble dsin(jdouble x);
coleenp@9933 221 // static jdouble dcos(jdouble x);
coleenp@9933 222 // static jdouble dtan(jdouble x);
coleenp@9933 223 // static jdouble dlog(jdouble x);
coleenp@9933 224 // static jdouble dlog10(jdouble x);
coleenp@9933 225 // static jdouble dexp(jdouble x);
coleenp@9933 226 // static jdouble dpow(jdouble x, jdouble y);
coleenp@9933 227
coleenp@9933 228 void TemplateInterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) {
coleenp@9933 229 address fn;
coleenp@9933 230 switch (kind) {
coleenp@9933 231 case Interpreter::java_lang_math_sin :
coleenp@9933 232 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
coleenp@9933 233 break;
coleenp@9933 234 case Interpreter::java_lang_math_cos :
coleenp@9933 235 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
coleenp@9933 236 break;
coleenp@9933 237 case Interpreter::java_lang_math_tan :
coleenp@9933 238 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
coleenp@9933 239 break;
coleenp@9933 240 case Interpreter::java_lang_math_log :
coleenp@9933 241 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
coleenp@9933 242 break;
coleenp@9933 243 case Interpreter::java_lang_math_log10 :
coleenp@9933 244 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
coleenp@9933 245 break;
coleenp@9933 246 case Interpreter::java_lang_math_exp :
coleenp@9933 247 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
coleenp@9933 248 break;
coleenp@9933 249 case Interpreter::java_lang_math_pow :
coleenp@9933 250 fpargs = 2;
coleenp@9933 251 fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
coleenp@9933 252 break;
coleenp@9933 253 default:
coleenp@9933 254 ShouldNotReachHere();
jwilhelm@9949 255 fn = NULL; // unreachable
coleenp@9933 256 }
coleenp@9933 257 const int gpargs = 0, rtype = 3;
coleenp@9933 258 __ mov(rscratch1, fn);
coleenp@9933 259 __ blrt(rscratch1, gpargs, fpargs, rtype);
coleenp@9933 260 }
coleenp@9933 261
coleenp@9933 262 // Abstract method entry
coleenp@9933 263 // Attempt to execute abstract method. Throw exception
coleenp@9933 264 address TemplateInterpreterGenerator::generate_abstract_entry(void) {
coleenp@9933 265 // rmethod: Method*
coleenp@9933 266 // r13: sender SP
coleenp@9933 267
coleenp@9933 268 address entry_point = __ pc();
coleenp@9933 269
coleenp@9933 270 // abstract method entry
coleenp@9933 271
coleenp@9933 272 // pop return address, reset last_sp to NULL
coleenp@9933 273 __ empty_expression_stack();
coleenp@9933 274 __ restore_bcp(); // bcp must be correct for exception handler (was destroyed)
coleenp@9933 275 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
coleenp@9933 276
coleenp@9933 277 // throw exception
coleenp@9933 278 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
coleenp@9933 279 InterpreterRuntime::throw_AbstractMethodError));
coleenp@9933 280 // the call_VM checks for exception, so we should never return here.
coleenp@9933 281 __ should_not_reach_here();
coleenp@9933 282
coleenp@9933 283 return entry_point;
coleenp@9933 284 }
coleenp@9933 285
aph@7879 286 address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
aph@7879 287 address entry = __ pc();
aph@7879 288
aph@7879 289 #ifdef ASSERT
aph@7879 290 {
aph@7879 291 Label L;
aph@7879 292 __ ldr(rscratch1, Address(rfp,
aph@7879 293 frame::interpreter_frame_monitor_block_top_offset *
aph@7879 294 wordSize));
aph@7879 295 __ mov(rscratch2, sp);
aph@7879 296 __ cmp(rscratch1, rscratch2); // maximal rsp for current rfp (stack
aph@7879 297 // grows negative)
aph@7879 298 __ br(Assembler::HS, L); // check if frame is complete
aph@7879 299 __ stop ("interpreter frame not set up");
aph@7879 300 __ bind(L);
aph@7879 301 }
aph@7879 302 #endif // ASSERT
aph@7879 303 // Restore bcp under the assumption that the current frame is still
aph@7879 304 // interpreted
aph@7879 305 __ restore_bcp();
aph@7879 306
aph@7879 307 // expression stack must be empty before entering the VM if an
aph@7879 308 // exception happened
aph@7879 309 __ empty_expression_stack();
aph@7879 310 // throw exception
aph@7879 311 __ call_VM(noreg,
aph@7879 312 CAST_FROM_FN_PTR(address,
aph@7879 313 InterpreterRuntime::throw_StackOverflowError));
aph@7879 314 return entry;
aph@7879 315 }
aph@7879 316
aph@7879 317 address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
aph@7879 318 const char* name) {
aph@7879 319 address entry = __ pc();
aph@7879 320 // expression stack must be empty before entering the VM if an
aph@7879 321 // exception happened
aph@7879 322 __ empty_expression_stack();
aph@7879 323 // setup parameters
aph@7879 324 // ??? convention: expect aberrant index in register r1
aph@7879 325 __ movw(c_rarg2, r1);
aph@7879 326 __ mov(c_rarg1, (address)name);
aph@7879 327 __ call_VM(noreg,
aph@7879 328 CAST_FROM_FN_PTR(address,
aph@7879 329 InterpreterRuntime::
aph@7879 330 throw_ArrayIndexOutOfBoundsException),
aph@7879 331 c_rarg1, c_rarg2);
aph@7879 332 return entry;
aph@7879 333 }
aph@7879 334
aph@7879 335 address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
aph@7879 336 address entry = __ pc();
aph@7879 337
aph@7879 338 // object is at TOS
aph@7879 339 __ pop(c_rarg1);
aph@7879 340
aph@7879 341 // expression stack must be empty before entering the VM if an
aph@7879 342 // exception happened
aph@7879 343 __ empty_expression_stack();
aph@7879 344
aph@7879 345 __ call_VM(noreg,
aph@7879 346 CAST_FROM_FN_PTR(address,
aph@7879 347 InterpreterRuntime::
aph@7879 348 throw_ClassCastException),
aph@7879 349 c_rarg1);
aph@7879 350 return entry;
aph@7879 351 }
aph@7879 352
aph@7879 353 address TemplateInterpreterGenerator::generate_exception_handler_common(
aph@7879 354 const char* name, const char* message, bool pass_oop) {
aph@7879 355 assert(!pass_oop || message == NULL, "either oop or message but not both");
aph@7879 356 address entry = __ pc();
aph@7879 357 if (pass_oop) {
aph@7879 358 // object is at TOS
aph@7879 359 __ pop(c_rarg2);
aph@7879 360 }
aph@7879 361 // expression stack must be empty before entering the VM if an
aph@7879 362 // exception happened
aph@7879 363 __ empty_expression_stack();
aph@7879 364 // setup parameters
aph@7879 365 __ lea(c_rarg1, Address((address)name));
aph@7879 366 if (pass_oop) {
aph@7879 367 __ call_VM(r0, CAST_FROM_FN_PTR(address,
aph@7879 368 InterpreterRuntime::
aph@7879 369 create_klass_exception),
aph@7879 370 c_rarg1, c_rarg2);
aph@7879 371 } else {
aph@7879 372 // kind of lame ExternalAddress can't take NULL because
aph@7879 373 // external_word_Relocation will assert.
aph@7879 374 if (message != NULL) {
aph@7879 375 __ lea(c_rarg2, Address((address)message));
aph@7879 376 } else {
aph@7879 377 __ mov(c_rarg2, NULL_WORD);
aph@7879 378 }
aph@7879 379 __ call_VM(r0,
aph@7879 380 CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
aph@7879 381 c_rarg1, c_rarg2);
aph@7879 382 }
aph@7879 383 // throw exception
aph@7879 384 __ b(address(Interpreter::throw_exception_entry()));
aph@7879 385 return entry;
aph@7879 386 }
aph@7879 387
aph@7879 388 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
aph@7879 389 address entry = __ pc();
aph@7879 390 // NULL last_sp until next java call
aph@7879 391 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 392 __ dispatch_next(state);
aph@7879 393 return entry;
aph@7879 394 }
aph@7879 395
aph@7879 396 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
aph@7879 397 address entry = __ pc();
aph@7879 398
aph@7879 399 // Restore stack bottom in case i2c adjusted stack
aph@7879 400 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 401 // and NULL it as marker that esp is now tos until next java call
aph@7879 402 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 403 __ restore_bcp();
aph@7879 404 __ restore_locals();
aph@7879 405 __ restore_constant_pool_cache();
aph@7879 406 __ get_method(rmethod);
aph@7879 407
aph@7879 408 // Pop N words from the stack
aph@7879 409 __ get_cache_and_index_at_bcp(r1, r2, 1, index_size);
aph@7879 410 __ ldr(r1, Address(r1, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
aph@7879 411 __ andr(r1, r1, ConstantPoolCacheEntry::parameter_size_mask);
aph@7879 412
aph@7879 413 __ add(esp, esp, r1, Assembler::LSL, 3);
aph@7879 414
aph@7879 415 // Restore machine SP
aph@7879 416 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
aph@7879 417 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
aph@7879 418 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
aph@7879 419 __ ldr(rscratch2,
aph@7879 420 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
aph@7879 421 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
aph@7879 422 __ andr(sp, rscratch1, -16);
aph@7879 423
aph@7879 424 #ifndef PRODUCT
aph@7879 425 // tell the simulator that the method has been reentered
aph@7879 426 if (NotifySimulator) {
aph@7879 427 __ notify(Assembler::method_reentry);
aph@7879 428 }
aph@7879 429 #endif
aph@7879 430 __ get_dispatch();
aph@7879 431 __ dispatch_next(state, step);
aph@7879 432
aph@7879 433 return entry;
aph@7879 434 }
aph@7879 435
aph@7879 436 address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
aph@7879 437 int step) {
aph@7879 438 address entry = __ pc();
aph@7879 439 __ restore_bcp();
aph@7879 440 __ restore_locals();
aph@7879 441 __ restore_constant_pool_cache();
aph@7879 442 __ get_method(rmethod);
aph@11712 443 __ get_dispatch();
aph@11712 444
aph@11712 445 // Calculate stack limit
aph@11712 446 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
aph@11712 447 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
aph@11712 448 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
aph@11712 449 __ ldr(rscratch2,
aph@11712 450 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
aph@11712 451 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
aph@11712 452 __ andr(sp, rscratch1, -16);
aph@11712 453
aph@11712 454 // Restore expression stack pointer
aph@11712 455 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@11712 456 // NULL last_sp until next java call
aph@11712 457 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 458
twisti@9813 459 #if INCLUDE_JVMCI
never@12177 460 // Check if we need to take lock at entry of synchronized method. This can
never@12177 461 // only occur on method entry so emit it only for vtos with step 0.
never@12177 462 if (UseJVMCICompiler && state == vtos && step == 0) {
twisti@9813 463 Label L;
twisti@9813 464 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
twisti@9813 465 __ cbz(rscratch1, L);
twisti@9813 466 // Clear flag.
twisti@9813 467 __ strb(zr, Address(rthread, JavaThread::pending_monitorenter_offset()));
twisti@9813 468 // Take lock.
twisti@9813 469 lock_method();
twisti@9813 470 __ bind(L);
never@12177 471 } else {
never@12177 472 #ifdef ASSERT
never@12177 473 if (UseJVMCICompiler) {
never@12177 474 Label L;
never@12177 475 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
never@12177 476 __ cbz(rscratch1, L);
never@12177 477 __ stop("unexpected pending monitor in deopt entry");
never@12177 478 __ bind(L);
never@12177 479 }
never@12177 480 #endif
twisti@9813 481 }
bobv@12234 482 #endif
aph@7879 483 // handle exceptions
aph@7879 484 {
aph@7879 485 Label L;
aph@7879 486 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
aph@7879 487 __ cbz(rscratch1, L);
aph@7879 488 __ call_VM(noreg,
aph@7879 489 CAST_FROM_FN_PTR(address,
aph@7879 490 InterpreterRuntime::throw_pending_exception));
aph@7879 491 __ should_not_reach_here();
aph@7879 492 __ bind(L);
aph@7879 493 }
aph@7879 494
aph@7879 495 __ dispatch_next(state, step);
aph@7879 496 return entry;
aph@7879 497 }
aph@7879 498
aph@7879 499 address TemplateInterpreterGenerator::generate_result_handler_for(
aph@7879 500 BasicType type) {
aph@7879 501 address entry = __ pc();
aph@7879 502 switch (type) {
aph@7879 503 case T_BOOLEAN: __ uxtb(r0, r0); break;
aph@7879 504 case T_CHAR : __ uxth(r0, r0); break;
aph@7879 505 case T_BYTE : __ sxtb(r0, r0); break;
aph@7879 506 case T_SHORT : __ sxth(r0, r0); break;
aph@7879 507 case T_INT : __ uxtw(r0, r0); break; // FIXME: We almost certainly don't need this
aph@7879 508 case T_LONG : /* nothing to do */ break;
aph@7879 509 case T_VOID : /* nothing to do */ break;
aph@7879 510 case T_FLOAT : /* nothing to do */ break;
aph@7879 511 case T_DOUBLE : /* nothing to do */ break;
aph@7879 512 case T_OBJECT :
aph@7879 513 // retrieve result from frame
aph@7879 514 __ ldr(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
aph@7879 515 // and verify it
aph@7879 516 __ verify_oop(r0);
aph@7879 517 break;
aph@7879 518 default : ShouldNotReachHere();
aph@7879 519 }
aph@7879 520 __ ret(lr); // return from result handler
aph@7879 521 return entry;
aph@7879 522 }
aph@7879 523
aph@7879 524 address TemplateInterpreterGenerator::generate_safept_entry_for(
aph@7879 525 TosState state,
aph@7879 526 address runtime_entry) {
aph@7879 527 address entry = __ pc();
aph@7879 528 __ push(state);
aph@7879 529 __ call_VM(noreg, runtime_entry);
aph@7879 530 __ membar(Assembler::AnyAny);
aph@7879 531 __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
aph@7879 532 return entry;
aph@7879 533 }
aph@7879 534
aph@7879 535 // Helpers for commoning out cases in the various type of method entries.
aph@7879 536 //
aph@7879 537
aph@7879 538
aph@7879 539 // increment invocation count & check for overflow
aph@7879 540 //
aph@7879 541 // Note: checking for negative value instead of overflow
aph@7879 542 // so we have a 'sticky' overflow test
aph@7879 543 //
aph@7879 544 // rmethod: method
aph@7879 545 //
coleenp@9879 546 void TemplateInterpreterGenerator::generate_counter_incr(
aph@7879 547 Label* overflow,
aph@7879 548 Label* profile_method,
aph@7879 549 Label* profile_method_continue) {
aph@7879 550 Label done;
aph@7879 551 // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
aph@7879 552 if (TieredCompilation) {
aph@7879 553 int increment = InvocationCounter::count_increment;
aph@7879 554 Label no_mdo;
aph@7879 555 if (ProfileInterpreter) {
aph@7879 556 // Are we profiling?
aph@7879 557 __ ldr(r0, Address(rmethod, Method::method_data_offset()));
aph@7879 558 __ cbz(r0, no_mdo);
aph@7879 559 // Increment counter in the MDO
aph@7879 560 const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
aph@7879 561 in_bytes(InvocationCounter::counter_offset()));
adinn@7891 562 const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
adinn@7891 563 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
aph@7879 564 __ b(done);
aph@7879 565 }
aph@7879 566 __ bind(no_mdo);
aph@7879 567 // Increment counter in MethodCounters
aph@7879 568 const Address invocation_counter(rscratch2,
aph@7879 569 MethodCounters::invocation_counter_offset() +
aph@7879 570 InvocationCounter::counter_offset());
aph@7879 571 __ get_method_counters(rmethod, rscratch2, done);
adinn@7891 572 const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
adinn@7891 573 __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
aph@7879 574 __ bind(done);
adinn@7891 575 } else { // not TieredCompilation
aph@7879 576 const Address backedge_counter(rscratch2,
aph@7879 577 MethodCounters::backedge_counter_offset() +
aph@7879 578 InvocationCounter::counter_offset());
aph@7879 579 const Address invocation_counter(rscratch2,
aph@7879 580 MethodCounters::invocation_counter_offset() +
aph@7879 581 InvocationCounter::counter_offset());
aph@7879 582
aph@7879 583 __ get_method_counters(rmethod, rscratch2, done);
aph@7879 584
aph@7879 585 if (ProfileInterpreter) { // %%% Merge this into MethodData*
aph@7879 586 __ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
aph@7879 587 __ addw(r1, r1, 1);
aph@7879 588 __ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
aph@7879 589 }
aph@7879 590 // Update standard invocation counters
aph@7879 591 __ ldrw(r1, invocation_counter);
aph@7879 592 __ ldrw(r0, backedge_counter);
aph@7879 593
aph@7879 594 __ addw(r1, r1, InvocationCounter::count_increment);
aph@7879 595 __ andw(r0, r0, InvocationCounter::count_mask_value);
aph@7879 596
aph@7879 597 __ strw(r1, invocation_counter);
aph@7879 598 __ addw(r0, r0, r1); // add both counters
aph@7879 599
aph@7879 600 // profile_method is non-null only for interpreted method so
aph@7879 601 // profile_method != NULL == !native_call
aph@7879 602
aph@7879 603 if (ProfileInterpreter && profile_method != NULL) {
aph@7879 604 // Test to see if we should create a method data oop
adinn@7891 605 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
adinn@7891 606 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
adinn@7891 607 __ cmpw(r0, rscratch2);
aph@7879 608 __ br(Assembler::LT, *profile_method_continue);
aph@7879 609
aph@7879 610 // if no method data exists, go to profile_method
aph@9791 611 __ test_method_data_pointer(rscratch2, *profile_method);
aph@7879 612 }
aph@7879 613
aph@7879 614 {
adinn@7891 615 __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
adinn@7891 616 __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
aph@7879 617 __ cmpw(r0, rscratch2);
aph@7879 618 __ br(Assembler::HS, *overflow);
aph@7879 619 }
aph@7879 620 __ bind(done);
aph@7879 621 }
aph@7879 622 }
aph@7879 623
coleenp@9879 624 void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
aph@7879 625
aph@7879 626 // Asm interpreter on entry
aph@7879 627 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
aph@7879 628 // Everything as it was on entry
aph@7879 629
aph@7879 630 // InterpreterRuntime::frequency_counter_overflow takes two
aph@7879 631 // arguments, the first (thread) is passed by call_VM, the second
aph@7879 632 // indicates if the counter overflow occurs at a backwards branch
aph@7879 633 // (NULL bcp). We pass zero for it. The call returns the address
aph@7879 634 // of the verified entry point for the method or NULL if the
aph@7879 635 // compilation did not complete (either went background or bailed
aph@7879 636 // out).
aph@7879 637 __ mov(c_rarg1, 0);
aph@7879 638 __ call_VM(noreg,
aph@7879 639 CAST_FROM_FN_PTR(address,
aph@7879 640 InterpreterRuntime::frequency_counter_overflow),
aph@7879 641 c_rarg1);
aph@7879 642
coleenp@9879 643 __ b(do_continue);
aph@7879 644 }
aph@7879 645
goetz@11485 646 // See if we've got enough room on the stack for locals plus overhead
goetz@11485 647 // below JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
goetz@11485 648 // without going through the signal handler, i.e., reserved and yellow zones
goetz@11485 649 // will not be made usable. The shadow zone must suffice to handle the
goetz@11485 650 // overflow.
aph@7879 651 // The expression stack grows down incrementally, so the normal guard
aph@7879 652 // page mechanism will work for that.
aph@7879 653 //
aph@7879 654 // NOTE: Since the additional locals are also always pushed (wasn't
aph@7879 655 // obvious in generate_method_entry) so the guard should work for them
aph@7879 656 // too.
aph@7879 657 //
aph@7879 658 // Args:
aph@7879 659 // r3: number of additional locals this frame needs (what we must check)
aph@7879 660 // rmethod: Method*
aph@7879 661 //
aph@7879 662 // Kills:
aph@7879 663 // r0
coleenp@9879 664 void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
aph@7879 665
aph@7879 666 // monitor entry size: see picture of stack set
aph@7879 667 // (generate_method_entry) and frame_amd64.hpp
aph@7879 668 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
aph@7879 669
aph@7879 670 // total overhead size: entry_size + (saved rbp through expr stack
aph@7879 671 // bottom). be sure to change this if you add/subtract anything
aph@7879 672 // to/from the overhead area
aph@7879 673 const int overhead_size =
aph@7879 674 -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
aph@7879 675
aph@7879 676 const int page_size = os::vm_page_size();
aph@7879 677
aph@7879 678 Label after_frame_check;
aph@7879 679
aph@7879 680 // see if the frame is greater than one page in size. If so,
aph@7879 681 // then we need to verify there is enough stack space remaining
aph@7879 682 // for the additional locals.
aph@7879 683 //
aph@7879 684 // Note that we use SUBS rather than CMP here because the immediate
aph@7879 685 // field of this instruction may overflow. SUBS can cope with this
aph@7879 686 // because it is a macro that will expand to some number of MOV
aph@7879 687 // instructions and a register operation.
aph@7879 688 __ subs(rscratch1, r3, (page_size - overhead_size) / Interpreter::stackElementSize);
aph@7879 689 __ br(Assembler::LS, after_frame_check);
aph@7879 690
aph@7879 691 // compute rsp as if this were going to be the last frame on
aph@7879 692 // the stack before the red zone
aph@7879 693
aph@7879 694 // locals + overhead, in bytes
aph@7879 695 __ mov(r0, overhead_size);
aph@7879 696 __ add(r0, r0, r3, Assembler::LSL, Interpreter::logStackElementSize); // 2 slots per parameter.
aph@7879 697
aph@11505 698 const Address stack_limit(rthread, JavaThread::stack_overflow_limit_offset());
goetz@11485 699 __ ldr(rscratch1, stack_limit);
aph@7879 700
aph@7879 701 #ifdef ASSERT
goetz@11485 702 Label limit_okay;
goetz@11485 703 // Verify that thread stack limit is non-zero.
goetz@11485 704 __ cbnz(rscratch1, limit_okay);
goetz@11485 705 __ stop("stack overflow limit is zero");
aph@11505 706 __ bind(limit_okay);
aph@7879 707 #endif
aph@7879 708
goetz@11485 709 // Add stack limit to locals.
aph@7879 710 __ add(r0, r0, rscratch1);
aph@7879 711
goetz@11485 712 // Check against the current stack bottom.
aph@7879 713 __ cmp(sp, r0);
aph@7879 714 __ br(Assembler::HI, after_frame_check);
aph@7879 715
aph@7879 716 // Remove the incoming args, peeling the machine SP back to where it
aph@7879 717 // was in the caller. This is not strictly necessary, but unless we
aph@7879 718 // do so the stack frame may have a garbage FP; this ensures a
aph@7879 719 // correct call stack that we can always unwind. The ANDR should be
aph@7879 720 // unnecessary because the sender SP in r13 is always aligned, but
aph@7879 721 // it doesn't hurt.
aph@7879 722 __ andr(sp, r13, -16);
aph@7879 723
aph@7879 724 // Note: the restored frame is not necessarily interpreted.
aph@7879 725 // Use the shared runtime version of the StackOverflowError.
aph@7879 726 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
aph@7879 727 __ far_jump(RuntimeAddress(StubRoutines::throw_StackOverflowError_entry()));
aph@7879 728
aph@7879 729 // all done with frame size check
aph@7879 730 __ bind(after_frame_check);
aph@7879 731 }
aph@7879 732
aph@7879 733 // Allocate monitor and lock method (asm interpreter)
aph@7879 734 //
aph@7879 735 // Args:
aph@7879 736 // rmethod: Method*
aph@7879 737 // rlocals: locals
aph@7879 738 //
aph@7879 739 // Kills:
aph@7879 740 // r0
aph@7879 741 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
aph@7879 742 // rscratch1, rscratch2 (scratch regs)
twisti@9109 743 void TemplateInterpreterGenerator::lock_method() {
aph@7879 744 // synchronize method
aph@7879 745 const Address access_flags(rmethod, Method::access_flags_offset());
aph@7879 746 const Address monitor_block_top(
aph@7879 747 rfp,
aph@7879 748 frame::interpreter_frame_monitor_block_top_offset * wordSize);
aph@7879 749 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
aph@7879 750
aph@7879 751 #ifdef ASSERT
aph@7879 752 {
aph@7879 753 Label L;
aph@7879 754 __ ldrw(r0, access_flags);
aph@7879 755 __ tst(r0, JVM_ACC_SYNCHRONIZED);
aph@7879 756 __ br(Assembler::NE, L);
aph@7879 757 __ stop("method doesn't need synchronization");
aph@7879 758 __ bind(L);
aph@7879 759 }
aph@7879 760 #endif // ASSERT
aph@7879 761
aph@7879 762 // get synchronization object
aph@7879 763 {
aph@7879 764 Label done;
aph@7879 765 __ ldrw(r0, access_flags);
aph@7879 766 __ tst(r0, JVM_ACC_STATIC);
aph@7879 767 // get receiver (assume this is frequent case)
aph@7879 768 __ ldr(r0, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
aph@7879 769 __ br(Assembler::EQ, done);
coleenp@11019 770 __ load_mirror(r0, rmethod);
aph@7879 771
aph@7879 772 #ifdef ASSERT
aph@7879 773 {
aph@7879 774 Label L;
aph@7879 775 __ cbnz(r0, L);
aph@7879 776 __ stop("synchronization object is NULL");
aph@7879 777 __ bind(L);
aph@7879 778 }
aph@7879 779 #endif // ASSERT
aph@7879 780
aph@7879 781 __ bind(done);
aph@7879 782 }
aph@7879 783
aph@7879 784 // add space for monitor & lock
aph@7879 785 __ sub(sp, sp, entry_size); // add space for a monitor entry
aph@7879 786 __ sub(esp, esp, entry_size);
aph@7879 787 __ mov(rscratch1, esp);
aph@7879 788 __ str(rscratch1, monitor_block_top); // set new monitor block top
aph@7879 789 // store object
aph@7879 790 __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes()));
aph@7879 791 __ mov(c_rarg1, esp); // object address
aph@7879 792 __ lock_object(c_rarg1);
aph@7879 793 }
aph@7879 794
aph@7879 795 // Generate a fixed interpreter frame. This is identical setup for
aph@7879 796 // interpreted methods and for native methods hence the shared code.
aph@7879 797 //
aph@7879 798 // Args:
aph@7879 799 // lr: return address
aph@7879 800 // rmethod: Method*
aph@7879 801 // rlocals: pointer to locals
aph@7879 802 // rcpool: cp cache
aph@7879 803 // stack_pointer: previous sp
aph@7879 804 void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
aph@7879 805 // initialize fixed part of activation frame
aph@7879 806 if (native_call) {
coleenp@11019 807 __ sub(esp, sp, 14 * wordSize);
coleenp@11019 808 __ mov(rbcp, zr);
coleenp@11019 809 __ stp(esp, zr, Address(__ pre(sp, -14 * wordSize)));
coleenp@11019 810 // add 2 zero-initialized slots for native calls
coleenp@11019 811 __ stp(zr, zr, Address(sp, 12 * wordSize));
coleenp@11019 812 } else {
aph@7879 813 __ sub(esp, sp, 12 * wordSize);
aph@7879 814 __ ldr(rscratch1, Address(rmethod, Method::const_offset())); // get ConstMethod
aph@7879 815 __ add(rbcp, rscratch1, in_bytes(ConstMethod::codes_offset())); // get codebase
coleenp@11019 816 __ stp(esp, rbcp, Address(__ pre(sp, -12 * wordSize)));
aph@7879 817 }
aph@7879 818
aph@7879 819 if (ProfileInterpreter) {
aph@7879 820 Label method_data_continue;
aph@7879 821 __ ldr(rscratch1, Address(rmethod, Method::method_data_offset()));
aph@7879 822 __ cbz(rscratch1, method_data_continue);
aph@7879 823 __ lea(rscratch1, Address(rscratch1, in_bytes(MethodData::data_offset())));
aph@7879 824 __ bind(method_data_continue);
coleenp@11019 825 __ stp(rscratch1, rmethod, Address(sp, 6 * wordSize)); // save Method* and mdp (method data pointer)
aph@7879 826 } else {
coleenp@11019 827 __ stp(zr, rmethod, Address(sp, 6 * wordSize)); // save Method* (no mdp)
aph@7879 828 }
aph@7879 829
coleenp@11019 830 // Get mirror and store it in the frame as GC root for this Method*
coleenp@11019 831 __ load_mirror(rscratch1, rmethod);
coleenp@11019 832 __ stp(rscratch1, zr, Address(sp, 4 * wordSize));
coleenp@11019 833
aph@7879 834 __ ldr(rcpool, Address(rmethod, Method::const_offset()));
aph@7879 835 __ ldr(rcpool, Address(rcpool, ConstMethod::constants_offset()));
aph@7879 836 __ ldr(rcpool, Address(rcpool, ConstantPool::cache_offset_in_bytes()));
aph@7879 837 __ stp(rlocals, rcpool, Address(sp, 2 * wordSize));
aph@7879 838
coleenp@11019 839 __ stp(rfp, lr, Address(sp, 10 * wordSize));
coleenp@11019 840 __ lea(rfp, Address(sp, 10 * wordSize));
aph@7879 841
aph@7879 842 // set sender sp
aph@7879 843 // leave last_sp as null
coleenp@11019 844 __ stp(zr, r13, Address(sp, 8 * wordSize));
aph@7879 845
aph@7879 846 // Move SP out of the way
aph@7879 847 if (! native_call) {
aph@7879 848 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
aph@7879 849 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
aph@7879 850 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 2);
aph@7879 851 __ sub(rscratch1, sp, rscratch1, ext::uxtw, 3);
aph@7879 852 __ andr(sp, rscratch1, -16);
aph@7879 853 }
aph@7879 854 }
aph@7879 855
aph@7879 856 // End of helpers
aph@7879 857
aph@7879 858 // Various method entries
aph@7879 859 //------------------------------------------------------------------------------------------------------------------------
aph@7879 860 //
aph@7879 861 //
aph@7879 862
aph@7879 863 // Method entry for java.lang.ref.Reference.get.
coleenp@9879 864 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
aph@7879 865 #if INCLUDE_ALL_GCS
aph@7879 866 // Code: _aload_0, _getfield, _areturn
aph@7879 867 // parameter size = 1
aph@7879 868 //
aph@7879 869 // The code that gets generated by this routine is split into 2 parts:
aph@7879 870 // 1. The "intrinsified" code for G1 (or any SATB based GC),
aph@7879 871 // 2. The slow path - which is an expansion of the regular method entry.
aph@7879 872 //
aph@7879 873 // Notes:-
aph@7879 874 // * In the G1 code we do not check whether we need to block for
aph@7879 875 // a safepoint. If G1 is enabled then we must execute the specialized
aph@7879 876 // code for Reference.get (except when the Reference object is null)
aph@7879 877 // so that we can log the value in the referent field with an SATB
aph@7879 878 // update buffer.
aph@7879 879 // If the code for the getfield template is modified so that the
aph@7879 880 // G1 pre-barrier code is executed when the current method is
aph@7879 881 // Reference.get() then going through the normal method entry
aph@7879 882 // will be fine.
aph@7879 883 // * The G1 code can, however, check the receiver object (the instance
aph@7879 884 // of java.lang.Reference) and jump to the slow path if null. If the
aph@7879 885 // Reference object is null then we obviously cannot fetch the referent
aph@7879 886 // and so we don't need to call the G1 pre-barrier. Thus we can use the
aph@7879 887 // regular method entry code to generate the NPE.
aph@7879 888 //
aph@7879 889 // This code is based on generate_accessor_enty.
aph@7879 890 //
aph@7879 891 // rmethod: Method*
aph@7879 892 // r13: senderSP must preserve for slow path, set SP to it on fast path
aph@7879 893
aph@7879 894 address entry = __ pc();
aph@7879 895
aph@7879 896 const int referent_offset = java_lang_ref_Reference::referent_offset;
aph@7879 897 guarantee(referent_offset > 0, "referent offset not initialized");
aph@7879 898
aph@7879 899 if (UseG1GC) {
aph@7879 900 Label slow_path;
aph@7879 901 const Register local_0 = c_rarg0;
aph@7879 902 // Check if local 0 != NULL
aph@7879 903 // If the receiver is null then it is OK to jump to the slow path.
aph@7879 904 __ ldr(local_0, Address(esp, 0));
aph@7879 905 __ cbz(local_0, slow_path);
aph@7879 906
aph@7879 907
aph@7879 908 // Load the value of the referent field.
aph@7879 909 const Address field_address(local_0, referent_offset);
aph@7879 910 __ load_heap_oop(local_0, field_address);
aph@7879 911
aph@7879 912 // Generate the G1 pre-barrier code to log the value of
aph@7879 913 // the referent field in an SATB buffer.
aph@7879 914 __ enter(); // g1_write may call runtime
aph@7879 915 __ g1_write_barrier_pre(noreg /* obj */,
aph@7879 916 local_0 /* pre_val */,
aph@7879 917 rthread /* thread */,
aph@7879 918 rscratch2 /* tmp */,
aph@7879 919 true /* tosca_live */,
aph@7879 920 true /* expand_call */);
aph@7879 921 __ leave();
aph@7879 922 // areturn
aph@7879 923 __ andr(sp, r13, -16); // done with stack
aph@7879 924 __ ret(lr);
aph@7879 925
aph@7879 926 // generate a vanilla interpreter entry as the slow path
aph@7879 927 __ bind(slow_path);
mdoerr@9019 928 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
aph@7879 929 return entry;
aph@7879 930 }
aph@7879 931 #endif // INCLUDE_ALL_GCS
aph@7879 932
aph@7879 933 // If G1 is not enabled then attempt to go through the accessor entry point
aph@7879 934 // Reference.get is an accessor
enevill@10007 935 return NULL;
aph@7879 936 }
aph@7879 937
aph@7879 938 /**
aph@7879 939 * Method entry for static native methods:
aph@7879 940 * int java.util.zip.CRC32.update(int crc, int b)
aph@7879 941 */
coleenp@9879 942 address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
aph@7879 943 if (UseCRC32Intrinsics) {
aph@7879 944 address entry = __ pc();
aph@7879 945
aph@7879 946 // rmethod: Method*
aph@7879 947 // r13: senderSP must preserved for slow path
aph@7879 948 // esp: args
aph@7879 949
aph@7879 950 Label slow_path;
aph@7879 951 // If we need a safepoint check, generate full interpreter entry.
aph@7879 952 ExternalAddress state(SafepointSynchronize::address_of_state());
aph@7879 953 unsigned long offset;
aph@7879 954 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
aph@7879 955 __ ldrw(rscratch1, Address(rscratch1, offset));
aph@7879 956 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
aph@7879 957 __ cbnz(rscratch1, slow_path);
aph@7879 958
aph@7879 959 // We don't generate local frame and don't align stack because
aph@7879 960 // we call stub code and there is no safepoint on this path.
aph@7879 961
aph@7879 962 // Load parameters
aph@7879 963 const Register crc = c_rarg0; // crc
aph@7879 964 const Register val = c_rarg1; // source java byte value
aph@7879 965 const Register tbl = c_rarg2; // scratch
aph@7879 966
aph@7879 967 // Arguments are reversed on java expression stack
aph@7879 968 __ ldrw(val, Address(esp, 0)); // byte value
aph@7879 969 __ ldrw(crc, Address(esp, wordSize)); // Initial CRC
aph@7879 970
aph@7879 971 __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
aph@7879 972 __ add(tbl, tbl, offset);
aph@7879 973
aph@7879 974 __ ornw(crc, zr, crc); // ~crc
aph@7879 975 __ update_byte_crc32(crc, val, tbl);
aph@7879 976 __ ornw(crc, zr, crc); // ~crc
aph@7879 977
aph@7879 978 // result in c_rarg0
aph@7879 979
aph@7879 980 __ andr(sp, r13, -16);
aph@7879 981 __ ret(lr);
aph@7879 982
aph@7879 983 // generate a vanilla native entry as the slow path
aph@7879 984 __ bind(slow_path);
mdoerr@9019 985 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
aph@7879 986 return entry;
aph@7879 987 }
mdoerr@9019 988 return NULL;
aph@7879 989 }
aph@7879 990
aph@7879 991 /**
aph@7879 992 * Method entry for static native methods:
aph@7879 993 * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
aph@7879 994 * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
aph@7879 995 */
coleenp@9879 996 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
aph@7879 997 if (UseCRC32Intrinsics) {
aph@7879 998 address entry = __ pc();
aph@7879 999
aph@7879 1000 // rmethod,: Method*
aph@7879 1001 // r13: senderSP must preserved for slow path
aph@7879 1002
aph@7879 1003 Label slow_path;
aph@7879 1004 // If we need a safepoint check, generate full interpreter entry.
aph@7879 1005 ExternalAddress state(SafepointSynchronize::address_of_state());
aph@7879 1006 unsigned long offset;
aph@7879 1007 __ adrp(rscratch1, ExternalAddress(SafepointSynchronize::address_of_state()), offset);
aph@7879 1008 __ ldrw(rscratch1, Address(rscratch1, offset));
aph@7879 1009 assert(SafepointSynchronize::_not_synchronized == 0, "rewrite this code");
aph@7879 1010 __ cbnz(rscratch1, slow_path);
aph@7879 1011
aph@7879 1012 // We don't generate local frame and don't align stack because
aph@7879 1013 // we call stub code and there is no safepoint on this path.
aph@7879 1014
aph@7879 1015 // Load parameters
aph@7879 1016 const Register crc = c_rarg0; // crc
aph@7879 1017 const Register buf = c_rarg1; // source java byte array address
aph@7879 1018 const Register len = c_rarg2; // length
aph@7879 1019 const Register off = len; // offset (never overlaps with 'len')
aph@7879 1020
aph@7879 1021 // Arguments are reversed on java expression stack
aph@7879 1022 // Calculate address of start element
aph@7879 1023 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
aph@7879 1024 __ ldr(buf, Address(esp, 2*wordSize)); // long buf
aph@7879 1025 __ ldrw(off, Address(esp, wordSize)); // offset
aph@7879 1026 __ add(buf, buf, off); // + offset
aph@7879 1027 __ ldrw(crc, Address(esp, 4*wordSize)); // Initial CRC
aph@7879 1028 } else {
aph@7879 1029 __ ldr(buf, Address(esp, 2*wordSize)); // byte[] array
aph@7879 1030 __ add(buf, buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
aph@7879 1031 __ ldrw(off, Address(esp, wordSize)); // offset
aph@7879 1032 __ add(buf, buf, off); // + offset
aph@7879 1033 __ ldrw(crc, Address(esp, 3*wordSize)); // Initial CRC
aph@7879 1034 }
aph@7879 1035 // Can now load 'len' since we're finished with 'off'
aph@7879 1036 __ ldrw(len, Address(esp, 0x0)); // Length
aph@7879 1037
aph@7879 1038 __ andr(sp, r13, -16); // Restore the caller's SP
aph@7879 1039
aph@7879 1040 // We are frameless so we can just jump to the stub.
aph@7879 1041 __ b(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()));
aph@7879 1042
aph@7879 1043 // generate a vanilla native entry as the slow path
aph@7879 1044 __ bind(slow_path);
mdoerr@9019 1045 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
aph@7879 1046 return entry;
aph@7879 1047 }
mdoerr@9019 1048 return NULL;
aph@7879 1049 }
aph@7879 1050
coleenp@9879 1051 // Not supported
coleenp@9879 1052 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
coleenp@9879 1053 return NULL;
coleenp@9879 1054 }
coleenp@9879 1055
coleenp@9879 1056 void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
aph@7879 1057 // Bang each page in the shadow zone. We can't assume it's been done for
aph@7879 1058 // an interpreter frame with greater than a page of locals, so each page
aph@7879 1059 // needs to be checked. Only true for non-native.
aph@7879 1060 if (UseStackBanging) {
enevill@10007 1061 const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
goetz@9866 1062 const int start_page = native_call ? n_shadow_pages : 1;
aph@7879 1063 const int page_size = os::vm_page_size();
goetz@9866 1064 for (int pages = start_page; pages <= n_shadow_pages ; pages++) {
aph@7879 1065 __ sub(rscratch2, sp, pages*page_size);
aph@7987 1066 __ str(zr, Address(rscratch2));
aph@7879 1067 }
aph@7879 1068 }
aph@7879 1069 }
aph@7879 1070
aph@7879 1071
aph@7879 1072 // Interpreter stub for calling a native method. (asm interpreter)
aph@7879 1073 // This sets up a somewhat different looking stack for calling the
aph@7879 1074 // native method than the typical interpreter frame setup.
coleenp@9879 1075 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
aph@7879 1076 // determine code generation flags
minqi@8705 1077 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
aph@7879 1078
aph@7879 1079 // r1: Method*
aph@7879 1080 // rscratch1: sender sp
aph@7879 1081
aph@7879 1082 address entry_point = __ pc();
aph@7879 1083
aph@7879 1084 const Address constMethod (rmethod, Method::const_offset());
aph@7879 1085 const Address access_flags (rmethod, Method::access_flags_offset());
aph@7879 1086 const Address size_of_parameters(r2, ConstMethod::
aph@7879 1087 size_of_parameters_offset());
aph@7879 1088
aph@7879 1089 // get parameter size (always needed)
aph@7879 1090 __ ldr(r2, constMethod);
aph@7879 1091 __ load_unsigned_short(r2, size_of_parameters);
aph@7879 1092
goetz@11485 1093 // Native calls don't need the stack size check since they have no
aph@7879 1094 // expression stack and the arguments are already on the stack and
goetz@11485 1095 // we only add a handful of words to the stack.
aph@7879 1096
aph@7879 1097 // rmethod: Method*
aph@7879 1098 // r2: size of parameters
aph@7879 1099 // rscratch1: sender sp
aph@7879 1100
aph@7879 1101 // for natives the size of locals is zero
aph@7879 1102
aph@7879 1103 // compute beginning of parameters (rlocals)
aph@7879 1104 __ add(rlocals, esp, r2, ext::uxtx, 3);
aph@7879 1105 __ add(rlocals, rlocals, -wordSize);
aph@7879 1106
aph@7879 1107 // Pull SP back to minimum size: this avoids holes in the stack
aph@7879 1108 __ andr(sp, esp, -16);
aph@7879 1109
aph@7879 1110 // initialize fixed part of activation frame
aph@7879 1111 generate_fixed_frame(true);
aph@7879 1112 #ifndef PRODUCT
aph@7879 1113 // tell the simulator that a method has been entered
aph@7879 1114 if (NotifySimulator) {
aph@7879 1115 __ notify(Assembler::method_entry);
aph@7879 1116 }
aph@7879 1117 #endif
aph@7879 1118
aph@7879 1119 // make sure method is native & not abstract
aph@7879 1120 #ifdef ASSERT
aph@7879 1121 __ ldrw(r0, access_flags);
aph@7879 1122 {
aph@7879 1123 Label L;
aph@7879 1124 __ tst(r0, JVM_ACC_NATIVE);
aph@7879 1125 __ br(Assembler::NE, L);
aph@7879 1126 __ stop("tried to execute non-native method as native");
aph@7879 1127 __ bind(L);
aph@7879 1128 }
aph@7879 1129 {
aph@7879 1130 Label L;
aph@7879 1131 __ tst(r0, JVM_ACC_ABSTRACT);
aph@7879 1132 __ br(Assembler::EQ, L);
aph@7879 1133 __ stop("tried to execute abstract method in interpreter");
aph@7879 1134 __ bind(L);
aph@7879 1135 }
aph@7879 1136 #endif
aph@7879 1137
aph@7879 1138 // Since at this point in the method invocation the exception
aph@7879 1139 // handler would try to exit the monitor of synchronized methods
aph@7879 1140 // which hasn't been entered yet, we set the thread local variable
aph@7879 1141 // _do_not_unlock_if_synchronized to true. The remove_activation
aph@7879 1142 // will check this flag.
aph@7879 1143
aph@7879 1144 const Address do_not_unlock_if_synchronized(rthread,
aph@7879 1145 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
aph@7879 1146 __ mov(rscratch2, true);
aph@7879 1147 __ strb(rscratch2, do_not_unlock_if_synchronized);
aph@7879 1148
aph@7879 1149 // increment invocation count & check for overflow
aph@7879 1150 Label invocation_counter_overflow;
aph@7879 1151 if (inc_counter) {
aph@7879 1152 generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
aph@7879 1153 }
aph@7879 1154
aph@7879 1155 Label continue_after_compile;
aph@7879 1156 __ bind(continue_after_compile);
aph@7879 1157
aph@7879 1158 bang_stack_shadow_pages(true);
aph@7879 1159
aph@7879 1160 // reset the _do_not_unlock_if_synchronized flag
aph@7879 1161 __ strb(zr, do_not_unlock_if_synchronized);
aph@7879 1162
aph@7879 1163 // check for synchronized methods
aph@7879 1164 // Must happen AFTER invocation_counter check and stack overflow check,
aph@7879 1165 // so method is not locked if overflows.
aph@7879 1166 if (synchronized) {
aph@7879 1167 lock_method();
aph@7879 1168 } else {
aph@7879 1169 // no synchronization necessary
aph@7879 1170 #ifdef ASSERT
aph@7879 1171 {
aph@7879 1172 Label L;
aph@7879 1173 __ ldrw(r0, access_flags);
aph@7879 1174 __ tst(r0, JVM_ACC_SYNCHRONIZED);
aph@7879 1175 __ br(Assembler::EQ, L);
aph@7879 1176 __ stop("method needs synchronization");
aph@7879 1177 __ bind(L);
aph@7879 1178 }
aph@7879 1179 #endif
aph@7879 1180 }
aph@7879 1181
aph@7879 1182 // start execution
aph@7879 1183 #ifdef ASSERT
aph@7879 1184 {
aph@7879 1185 Label L;
aph@7879 1186 const Address monitor_block_top(rfp,
aph@7879 1187 frame::interpreter_frame_monitor_block_top_offset * wordSize);
aph@7879 1188 __ ldr(rscratch1, monitor_block_top);
aph@7879 1189 __ cmp(esp, rscratch1);
aph@7879 1190 __ br(Assembler::EQ, L);
aph@7879 1191 __ stop("broken stack frame setup in interpreter");
aph@7879 1192 __ bind(L);
aph@7879 1193 }
aph@7879 1194 #endif
aph@7879 1195
aph@7879 1196 // jvmti support
aph@7879 1197 __ notify_method_entry();
aph@7879 1198
aph@7879 1199 // work registers
aph@7879 1200 const Register t = r17;
aph@7879 1201 const Register result_handler = r19;
aph@7879 1202
aph@7879 1203 // allocate space for parameters
aph@7879 1204 __ ldr(t, Address(rmethod, Method::const_offset()));
aph@7879 1205 __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
aph@7879 1206
aph@7879 1207 __ sub(rscratch1, esp, t, ext::uxtx, Interpreter::logStackElementSize);
aph@7879 1208 __ andr(sp, rscratch1, -16);
aph@7879 1209 __ mov(esp, rscratch1);
aph@7879 1210
aph@7879 1211 // get signature handler
aph@7879 1212 {
aph@7879 1213 Label L;
aph@7879 1214 __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
aph@7879 1215 __ cbnz(t, L);
aph@7879 1216 __ call_VM(noreg,
aph@7879 1217 CAST_FROM_FN_PTR(address,
aph@7879 1218 InterpreterRuntime::prepare_native_call),
aph@7879 1219 rmethod);
aph@7879 1220 __ ldr(t, Address(rmethod, Method::signature_handler_offset()));
aph@7879 1221 __ bind(L);
aph@7879 1222 }
aph@7879 1223
aph@7879 1224 // call signature handler
aph@7879 1225 assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
aph@7879 1226 "adjust this code");
aph@7879 1227 assert(InterpreterRuntime::SignatureHandlerGenerator::to() == sp,
aph@7879 1228 "adjust this code");
aph@7879 1229 assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == rscratch1,
aph@7879 1230 "adjust this code");
aph@7879 1231
aph@7879 1232 // The generated handlers do not touch rmethod (the method).
aph@7879 1233 // However, large signatures cannot be cached and are generated
aph@7879 1234 // each time here. The slow-path generator can do a GC on return,
aph@7879 1235 // so we must reload it after the call.
aph@7879 1236 __ blr(t);
aph@7879 1237 __ get_method(rmethod); // slow path can do a GC, reload rmethod
aph@7879 1238
aph@7879 1239
aph@7879 1240 // result handler is in r0
aph@7879 1241 // set result handler
aph@7879 1242 __ mov(result_handler, r0);
aph@7879 1243 // pass mirror handle if static call
aph@7879 1244 {
aph@7879 1245 Label L;
aph@7879 1246 __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
fyang@11440 1247 __ tbz(t, exact_log2(JVM_ACC_STATIC), L);
aph@7879 1248 // get mirror
coleenp@11019 1249 __ load_mirror(t, rmethod);
aph@7879 1250 // copy mirror into activation frame
aph@7879 1251 __ str(t, Address(rfp, frame::interpreter_frame_oop_temp_offset * wordSize));
aph@7879 1252 // pass handle to mirror
aph@7879 1253 __ add(c_rarg1, rfp, frame::interpreter_frame_oop_temp_offset * wordSize);
aph@7879 1254 __ bind(L);
aph@7879 1255 }
aph@7879 1256
aph@7879 1257 // get native function entry point in r10
aph@7879 1258 {
aph@7879 1259 Label L;
aph@7879 1260 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
aph@7879 1261 address unsatisfied = (SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
aph@7879 1262 __ mov(rscratch2, unsatisfied);
aph@7879 1263 __ ldr(rscratch2, rscratch2);
aph@7879 1264 __ cmp(r10, rscratch2);
aph@7879 1265 __ br(Assembler::NE, L);
aph@7879 1266 __ call_VM(noreg,
aph@7879 1267 CAST_FROM_FN_PTR(address,
aph@7879 1268 InterpreterRuntime::prepare_native_call),
aph@7879 1269 rmethod);
aph@7879 1270 __ get_method(rmethod);
aph@7879 1271 __ ldr(r10, Address(rmethod, Method::native_function_offset()));
aph@7879 1272 __ bind(L);
aph@7879 1273 }
aph@7879 1274
aph@7879 1275 // pass JNIEnv
aph@7879 1276 __ add(c_rarg0, rthread, in_bytes(JavaThread::jni_environment_offset()));
aph@7879 1277
aph@7879 1278 // It is enough that the pc() points into the right code
aph@7879 1279 // segment. It does not have to be the correct return pc.
aph@7879 1280 __ set_last_Java_frame(esp, rfp, (address)NULL, rscratch1);
aph@7879 1281
aph@7879 1282 // change thread state
aph@7879 1283 #ifdef ASSERT
aph@7879 1284 {
aph@7879 1285 Label L;
aph@7879 1286 __ ldrw(t, Address(rthread, JavaThread::thread_state_offset()));
aph@7879 1287 __ cmp(t, _thread_in_Java);
aph@7879 1288 __ br(Assembler::EQ, L);
aph@7879 1289 __ stop("Wrong thread state in native stub");
aph@7879 1290 __ bind(L);
aph@7879 1291 }
aph@7879 1292 #endif
aph@7879 1293
aph@7879 1294 // Change state to native
aph@7879 1295 __ mov(rscratch1, _thread_in_native);
aph@7882 1296 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
aph@7882 1297 __ stlrw(rscratch1, rscratch2);
aph@7879 1298
aph@7879 1299 // Call the native method.
aph@7879 1300 __ blrt(r10, rscratch1);
aph@7879 1301 __ maybe_isb();
aph@7879 1302 __ get_method(rmethod);
aph@7879 1303 // result potentially in r0 or v0
aph@7879 1304
aph@7879 1305 // make room for the pushes we're about to do
aph@7879 1306 __ sub(rscratch1, esp, 4 * wordSize);
aph@7879 1307 __ andr(sp, rscratch1, -16);
aph@7879 1308
aph@7879 1309 // NOTE: The order of these pushes is known to frame::interpreter_frame_result
aph@7879 1310 // in order to extract the result of a method call. If the order of these
aph@7879 1311 // pushes change or anything else is added to the stack then the code in
aph@7879 1312 // interpreter_frame_result must also change.
aph@7879 1313 __ push(dtos);
aph@7879 1314 __ push(ltos);
aph@7879 1315
aph@7879 1316 // change thread state
aph@7879 1317 __ mov(rscratch1, _thread_in_native_trans);
aph@7882 1318 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
aph@7882 1319 __ stlrw(rscratch1, rscratch2);
aph@7879 1320
aph@7879 1321 if (os::is_MP()) {
aph@7879 1322 if (UseMembar) {
aph@7879 1323 // Force this write out before the read below
aph@7879 1324 __ dsb(Assembler::SY);
aph@7879 1325 } else {
aph@7879 1326 // Write serialization page so VM thread can do a pseudo remote membar.
aph@7879 1327 // We use the current thread pointer to calculate a thread specific
aph@7879 1328 // offset to write to within the page. This minimizes bus traffic
aph@7879 1329 // due to cache line collision.
aph@7879 1330 __ serialize_memory(rthread, rscratch2);
aph@7879 1331 }
aph@7879 1332 }
aph@7879 1333
aph@7879 1334 // check for safepoint operation in progress and/or pending suspend requests
aph@7879 1335 {
aph@7879 1336 Label Continue;
aph@7879 1337 {
aph@7879 1338 unsigned long offset;
aph@7879 1339 __ adrp(rscratch2, SafepointSynchronize::address_of_state(), offset);
aph@7879 1340 __ ldrw(rscratch2, Address(rscratch2, offset));
aph@7879 1341 }
aph@7879 1342 assert(SafepointSynchronize::_not_synchronized == 0,
aph@7879 1343 "SafepointSynchronize::_not_synchronized");
aph@7879 1344 Label L;
aph@7879 1345 __ cbnz(rscratch2, L);
aph@7879 1346 __ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
aph@7879 1347 __ cbz(rscratch2, Continue);
aph@7879 1348 __ bind(L);
aph@7879 1349
aph@7879 1350 // Don't use call_VM as it will see a possible pending exception
aph@7879 1351 // and forward it and never return here preventing us from
aph@7879 1352 // clearing _last_native_pc down below. So we do a runtime call by
aph@7879 1353 // hand.
aph@7879 1354 //
aph@7879 1355 __ mov(c_rarg0, rthread);
aph@7879 1356 __ mov(rscratch2, CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
aph@7879 1357 __ blrt(rscratch2, 1, 0, 0);
aph@7879 1358 __ maybe_isb();
aph@7879 1359 __ get_method(rmethod);
aph@7879 1360 __ reinit_heapbase();
aph@7879 1361 __ bind(Continue);
aph@7879 1362 }
aph@7879 1363
aph@7879 1364 // change thread state
aph@7879 1365 __ mov(rscratch1, _thread_in_Java);
aph@7882 1366 __ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
aph@7882 1367 __ stlrw(rscratch1, rscratch2);
aph@7879 1368
aph@7879 1369 // reset_last_Java_frame
aph@11844 1370 __ reset_last_Java_frame(true);
aph@7879 1371
dsimms@11990 1372 if (CheckJNICalls) {
dsimms@11990 1373 // clear_pending_jni_exception_check
dsimms@11990 1374 __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
dsimms@11990 1375 }
dsimms@11990 1376
aph@7879 1377 // reset handle block
aph@7879 1378 __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
aph@7879 1379 __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
aph@7879 1380
aph@7879 1381 // If result is an oop unbox and store it in frame where gc will see it
aph@7879 1382 // and result handler will pick it up
aph@7879 1383
aph@7879 1384 {
aph@7879 1385 Label no_oop, store_result;
aph@7879 1386 __ adr(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
aph@7879 1387 __ cmp(t, result_handler);
aph@7879 1388 __ br(Assembler::NE, no_oop);
aph@7879 1389 // retrieve result
aph@7879 1390 __ pop(ltos);
aph@7879 1391 __ cbz(r0, store_result);
aph@7879 1392 __ ldr(r0, Address(r0, 0));
aph@7879 1393 __ bind(store_result);
aph@7879 1394 __ str(r0, Address(rfp, frame::interpreter_frame_oop_temp_offset*wordSize));
aph@7879 1395 // keep stack depth as expected by pushing oop which will eventually be discarded
aph@7879 1396 __ push(ltos);
aph@7879 1397 __ bind(no_oop);
aph@7879 1398 }
aph@7879 1399
aph@7879 1400 {
aph@7879 1401 Label no_reguard;
aph@7879 1402 __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
enevill@10007 1403 __ ldrw(rscratch1, Address(rscratch1));
enevill@10007 1404 __ cmp(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
aph@7879 1405 __ br(Assembler::NE, no_reguard);
aph@7879 1406
aph@7879 1407 __ pusha(); // XXX only save smashed registers
aph@7879 1408 __ mov(c_rarg0, rthread);
aph@7879 1409 __ mov(rscratch2, CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
aph@7879 1410 __ blrt(rscratch2, 0, 0, 0);
aph@7879 1411 __ popa(); // XXX only restore smashed registers
aph@7879 1412 __ bind(no_reguard);
aph@7879 1413 }
aph@7879 1414
aph@7879 1415 // The method register is junk from after the thread_in_native transition
aph@7879 1416 // until here. Also can't call_VM until the bcp has been
aph@7879 1417 // restored. Need bcp for throwing exception below so get it now.
aph@7879 1418 __ get_method(rmethod);
aph@7879 1419
aph@7879 1420 // restore bcp to have legal interpreter frame, i.e., bci == 0 <=>
aph@7879 1421 // rbcp == code_base()
aph@7879 1422 __ ldr(rbcp, Address(rmethod, Method::const_offset())); // get ConstMethod*
aph@7879 1423 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset())); // get codebase
aph@7879 1424 // handle exceptions (exception handling will handle unlocking!)
aph@7879 1425 {
aph@7879 1426 Label L;
aph@7879 1427 __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
aph@7879 1428 __ cbz(rscratch1, L);
aph@7879 1429 // Note: At some point we may want to unify this with the code
aph@7879 1430 // used in call_VM_base(); i.e., we should use the
aph@7879 1431 // StubRoutines::forward_exception code. For now this doesn't work
aph@7879 1432 // here because the rsp is not correctly set at this point.
aph@7879 1433 __ MacroAssembler::call_VM(noreg,
aph@7879 1434 CAST_FROM_FN_PTR(address,
aph@7879 1435 InterpreterRuntime::throw_pending_exception));
aph@7879 1436 __ should_not_reach_here();
aph@7879 1437 __ bind(L);
aph@7879 1438 }
aph@7879 1439
aph@7879 1440 // do unlocking if necessary
aph@7879 1441 {
aph@7879 1442 Label L;
aph@7879 1443 __ ldrw(t, Address(rmethod, Method::access_flags_offset()));
fyang@11440 1444 __ tbz(t, exact_log2(JVM_ACC_SYNCHRONIZED), L);
aph@7879 1445 // the code below should be shared with interpreter macro
aph@7879 1446 // assembler implementation
aph@7879 1447 {
aph@7879 1448 Label unlock;
aph@7879 1449 // BasicObjectLock will be first in list, since this is a
aph@7879 1450 // synchronized method. However, need to check that the object
aph@7879 1451 // has not been unlocked by an explicit monitorexit bytecode.
aph@7879 1452
aph@7879 1453 // monitor expect in c_rarg1 for slow unlock path
aph@7879 1454 __ lea (c_rarg1, Address(rfp, // address of first monitor
aph@7879 1455 (intptr_t)(frame::interpreter_frame_initial_sp_offset *
aph@7879 1456 wordSize - sizeof(BasicObjectLock))));
aph@7879 1457
aph@7879 1458 __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
aph@7879 1459 __ cbnz(t, unlock);
aph@7879 1460
aph@7879 1461 // Entry already unlocked, need to throw exception
aph@7879 1462 __ MacroAssembler::call_VM(noreg,
aph@7879 1463 CAST_FROM_FN_PTR(address,
aph@7879 1464 InterpreterRuntime::throw_illegal_monitor_state_exception));
aph@7879 1465 __ should_not_reach_here();
aph@7879 1466
aph@7879 1467 __ bind(unlock);
aph@7879 1468 __ unlock_object(c_rarg1);
aph@7879 1469 }
aph@7879 1470 __ bind(L);
aph@7879 1471 }
aph@7879 1472
aph@7879 1473 // jvmti support
aph@7879 1474 // Note: This must happen _after_ handling/throwing any exceptions since
aph@7879 1475 // the exception handler code notifies the runtime of method exits
aph@7879 1476 // too. If this happens before, method entry/exit notifications are
aph@7879 1477 // not properly paired (was bug - gri 11/22/99).
aph@7879 1478 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
aph@7879 1479
aph@7879 1480 // restore potential result in r0:d0, call result handler to
aph@7879 1481 // restore potential result in ST0 & handle result
aph@7879 1482
aph@7879 1483 __ pop(ltos);
aph@7879 1484 __ pop(dtos);
aph@7879 1485
aph@7879 1486 __ blr(result_handler);
aph@7879 1487
aph@7879 1488 // remove activation
aph@7879 1489 __ ldr(esp, Address(rfp,
aph@7879 1490 frame::interpreter_frame_sender_sp_offset *
aph@7879 1491 wordSize)); // get sender sp
aph@7879 1492 // remove frame anchor
aph@7879 1493 __ leave();
aph@7879 1494
aph@7879 1495 // resture sender sp
aph@7879 1496 __ mov(sp, esp);
aph@7879 1497
aph@7879 1498 __ ret(lr);
aph@7879 1499
aph@7879 1500 if (inc_counter) {
aph@7879 1501 // Handle overflow of counter and compile method
aph@7879 1502 __ bind(invocation_counter_overflow);
coleenp@9879 1503 generate_counter_overflow(continue_after_compile);
aph@7879 1504 }
aph@7879 1505
aph@7879 1506 return entry_point;
aph@7879 1507 }
aph@7879 1508
aph@7879 1509 //
aph@7879 1510 // Generic interpreted method entry to (asm) interpreter
aph@7879 1511 //
coleenp@9879 1512 address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
aph@7879 1513 // determine code generation flags
minqi@8705 1514 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
aph@7879 1515
aph@7879 1516 // rscratch1: sender sp
aph@7879 1517 address entry_point = __ pc();
aph@7879 1518
aph@7879 1519 const Address constMethod(rmethod, Method::const_offset());
aph@7879 1520 const Address access_flags(rmethod, Method::access_flags_offset());
aph@7879 1521 const Address size_of_parameters(r3,
aph@7879 1522 ConstMethod::size_of_parameters_offset());
aph@7879 1523 const Address size_of_locals(r3, ConstMethod::size_of_locals_offset());
aph@7879 1524
aph@7879 1525 // get parameter size (always needed)
aph@7879 1526 // need to load the const method first
aph@7879 1527 __ ldr(r3, constMethod);
aph@7879 1528 __ load_unsigned_short(r2, size_of_parameters);
aph@7879 1529
aph@7879 1530 // r2: size of parameters
aph@7879 1531
aph@7879 1532 __ load_unsigned_short(r3, size_of_locals); // get size of locals in words
aph@7879 1533 __ sub(r3, r3, r2); // r3 = no. of additional locals
aph@7879 1534
aph@7879 1535 // see if we've got enough room on the stack for locals plus overhead.
aph@7879 1536 generate_stack_overflow_check();
aph@7879 1537
aph@7879 1538 // compute beginning of parameters (rlocals)
aph@7879 1539 __ add(rlocals, esp, r2, ext::uxtx, 3);
aph@7879 1540 __ sub(rlocals, rlocals, wordSize);
aph@7879 1541
aph@7879 1542 // Make room for locals
aph@7879 1543 __ sub(rscratch1, esp, r3, ext::uxtx, 3);
aph@7879 1544 __ andr(sp, rscratch1, -16);
aph@7879 1545
aph@7879 1546 // r3 - # of additional locals
aph@7879 1547 // allocate space for locals
aph@7879 1548 // explicitly initialize locals
aph@7879 1549 {
aph@7879 1550 Label exit, loop;
aph@7879 1551 __ ands(zr, r3, r3);
aph@7879 1552 __ br(Assembler::LE, exit); // do nothing if r3 <= 0
aph@7879 1553 __ bind(loop);
aph@7879 1554 __ str(zr, Address(__ post(rscratch1, wordSize)));
aph@7879 1555 __ sub(r3, r3, 1); // until everything initialized
aph@7879 1556 __ cbnz(r3, loop);
aph@7879 1557 __ bind(exit);
aph@7879 1558 }
aph@7879 1559
aph@7879 1560 // And the base dispatch table
aph@7879 1561 __ get_dispatch();
aph@7879 1562
aph@7879 1563 // initialize fixed part of activation frame
aph@7879 1564 generate_fixed_frame(false);
aph@7879 1565 #ifndef PRODUCT
aph@7879 1566 // tell the simulator that a method has been entered
aph@7879 1567 if (NotifySimulator) {
aph@7879 1568 __ notify(Assembler::method_entry);
aph@7879 1569 }
aph@7879 1570 #endif
aph@7879 1571 // make sure method is not native & not abstract
aph@7879 1572 #ifdef ASSERT
aph@7879 1573 __ ldrw(r0, access_flags);
aph@7879 1574 {
aph@7879 1575 Label L;
aph@7879 1576 __ tst(r0, JVM_ACC_NATIVE);
aph@7879 1577 __ br(Assembler::EQ, L);
aph@7879 1578 __ stop("tried to execute native method as non-native");
aph@7879 1579 __ bind(L);
aph@7879 1580 }
aph@7879 1581 {
aph@7879 1582 Label L;
aph@7879 1583 __ tst(r0, JVM_ACC_ABSTRACT);
aph@7879 1584 __ br(Assembler::EQ, L);
aph@7879 1585 __ stop("tried to execute abstract method in interpreter");
aph@7879 1586 __ bind(L);
aph@7879 1587 }
aph@7879 1588 #endif
aph@7879 1589
aph@7879 1590 // Since at this point in the method invocation the exception
aph@7879 1591 // handler would try to exit the monitor of synchronized methods
aph@7879 1592 // which hasn't been entered yet, we set the thread local variable
aph@7879 1593 // _do_not_unlock_if_synchronized to true. The remove_activation
aph@7879 1594 // will check this flag.
aph@7879 1595
aph@7879 1596 const Address do_not_unlock_if_synchronized(rthread,
aph@7879 1597 in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
aph@7879 1598 __ mov(rscratch2, true);
aph@7879 1599 __ strb(rscratch2, do_not_unlock_if_synchronized);
aph@7879 1600
aph@7879 1601 // increment invocation count & check for overflow
aph@7879 1602 Label invocation_counter_overflow;
aph@7879 1603 Label profile_method;
aph@7879 1604 Label profile_method_continue;
aph@7879 1605 if (inc_counter) {
aph@7879 1606 generate_counter_incr(&invocation_counter_overflow,
aph@7879 1607 &profile_method,
aph@7879 1608 &profile_method_continue);
aph@7879 1609 if (ProfileInterpreter) {
aph@7879 1610 __ bind(profile_method_continue);
aph@7879 1611 }
aph@7879 1612 }
aph@7879 1613
aph@7879 1614 Label continue_after_compile;
aph@7879 1615 __ bind(continue_after_compile);
aph@7879 1616
aph@7879 1617 bang_stack_shadow_pages(false);
aph@7879 1618
aph@7879 1619 // reset the _do_not_unlock_if_synchronized flag
aph@7879 1620 __ strb(zr, do_not_unlock_if_synchronized);
aph@7879 1621
aph@7879 1622 // check for synchronized methods
aph@7879 1623 // Must happen AFTER invocation_counter check and stack overflow check,
aph@7879 1624 // so method is not locked if overflows.
aph@7879 1625 if (synchronized) {
aph@7879 1626 // Allocate monitor and lock method
aph@7879 1627 lock_method();
aph@7879 1628 } else {
aph@7879 1629 // no synchronization necessary
aph@7879 1630 #ifdef ASSERT
aph@7879 1631 {
aph@7879 1632 Label L;
aph@7879 1633 __ ldrw(r0, access_flags);
aph@7879 1634 __ tst(r0, JVM_ACC_SYNCHRONIZED);
aph@7879 1635 __ br(Assembler::EQ, L);
aph@7879 1636 __ stop("method needs synchronization");
aph@7879 1637 __ bind(L);
aph@7879 1638 }
aph@7879 1639 #endif
aph@7879 1640 }
aph@7879 1641
aph@7879 1642 // start execution
aph@7879 1643 #ifdef ASSERT
aph@7879 1644 {
aph@7879 1645 Label L;
aph@7879 1646 const Address monitor_block_top (rfp,
aph@7879 1647 frame::interpreter_frame_monitor_block_top_offset * wordSize);
aph@7879 1648 __ ldr(rscratch1, monitor_block_top);
aph@7879 1649 __ cmp(esp, rscratch1);
aph@7879 1650 __ br(Assembler::EQ, L);
aph@7879 1651 __ stop("broken stack frame setup in interpreter");
aph@7879 1652 __ bind(L);
aph@7879 1653 }
aph@7879 1654 #endif
aph@7879 1655
aph@7879 1656 // jvmti support
aph@7879 1657 __ notify_method_entry();
aph@7879 1658
aph@7879 1659 __ dispatch_next(vtos);
aph@7879 1660
aph@7879 1661 // invocation counter overflow
aph@7879 1662 if (inc_counter) {
aph@7879 1663 if (ProfileInterpreter) {
aph@7879 1664 // We have decided to profile this method in the interpreter
aph@7879 1665 __ bind(profile_method);
aph@7879 1666 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
aph@7879 1667 __ set_method_data_pointer_for_bcp();
aph@7879 1668 // don't think we need this
aph@7879 1669 __ get_method(r1);
aph@7879 1670 __ b(profile_method_continue);
aph@7879 1671 }
aph@7879 1672 // Handle overflow of counter and compile method
aph@7879 1673 __ bind(invocation_counter_overflow);
coleenp@9879 1674 generate_counter_overflow(continue_after_compile);
aph@7879 1675 }
aph@7879 1676
aph@7879 1677 return entry_point;
aph@7879 1678 }
aph@7879 1679
aph@7879 1680 //-----------------------------------------------------------------------------
aph@7879 1681 // Exceptions
aph@7879 1682
aph@7879 1683 void TemplateInterpreterGenerator::generate_throw_exception() {
aph@7879 1684 // Entry point in previous activation (i.e., if the caller was
aph@7879 1685 // interpreted)
aph@7879 1686 Interpreter::_rethrow_exception_entry = __ pc();
aph@7879 1687 // Restore sp to interpreter_frame_last_sp even though we are going
aph@7879 1688 // to empty the expression stack for the exception processing.
aph@7879 1689 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 1690 // r0: exception
aph@7879 1691 // r3: return address/pc that threw exception
aph@7879 1692 __ restore_bcp(); // rbcp points to call/send
aph@7879 1693 __ restore_locals();
aph@7879 1694 __ restore_constant_pool_cache();
aph@7879 1695 __ reinit_heapbase(); // restore rheapbase as heapbase.
aph@7879 1696 __ get_dispatch();
aph@7879 1697
aph@7879 1698 #ifndef PRODUCT
aph@7879 1699 // tell the simulator that the caller method has been reentered
aph@7879 1700 if (NotifySimulator) {
aph@7879 1701 __ get_method(rmethod);
aph@7879 1702 __ notify(Assembler::method_reentry);
aph@7879 1703 }
aph@7879 1704 #endif
aph@7879 1705 // Entry point for exceptions thrown within interpreter code
aph@7879 1706 Interpreter::_throw_exception_entry = __ pc();
aph@7879 1707 // If we came here via a NullPointerException on the receiver of a
aph@7879 1708 // method, rmethod may be corrupt.
aph@7879 1709 __ get_method(rmethod);
aph@7879 1710 // expression stack is undefined here
aph@7879 1711 // r0: exception
aph@7879 1712 // rbcp: exception bcp
aph@7879 1713 __ verify_oop(r0);
aph@7879 1714 __ mov(c_rarg1, r0);
aph@7879 1715
aph@7879 1716 // expression stack must be empty before entering the VM in case of
aph@7879 1717 // an exception
aph@7879 1718 __ empty_expression_stack();
aph@7879 1719 // find exception handler address and preserve exception oop
aph@7879 1720 __ call_VM(r3,
aph@7879 1721 CAST_FROM_FN_PTR(address,
aph@7879 1722 InterpreterRuntime::exception_handler_for_exception),
aph@7879 1723 c_rarg1);
aph@7879 1724
aph@7879 1725 // Calculate stack limit
aph@7879 1726 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
aph@7879 1727 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
aph@7879 1728 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
aph@7879 1729 __ ldr(rscratch2,
aph@7879 1730 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
aph@7879 1731 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtx, 3);
aph@7879 1732 __ andr(sp, rscratch1, -16);
aph@7879 1733
aph@7879 1734 // r0: exception handler entry point
aph@7879 1735 // r3: preserved exception oop
aph@7879 1736 // rbcp: bcp for exception handler
aph@7879 1737 __ push_ptr(r3); // push exception which is now the only value on the stack
aph@7879 1738 __ br(r0); // jump to exception handler (may be _remove_activation_entry!)
aph@7879 1739
aph@7879 1740 // If the exception is not handled in the current frame the frame is
aph@7879 1741 // removed and the exception is rethrown (i.e. exception
aph@7879 1742 // continuation is _rethrow_exception).
aph@7879 1743 //
aph@7879 1744 // Note: At this point the bci is still the bxi for the instruction
aph@7879 1745 // which caused the exception and the expression stack is
aph@7879 1746 // empty. Thus, for any VM calls at this point, GC will find a legal
aph@7879 1747 // oop map (with empty expression stack).
aph@7879 1748
aph@7879 1749 //
aph@7879 1750 // JVMTI PopFrame support
aph@7879 1751 //
aph@7879 1752
aph@7879 1753 Interpreter::_remove_activation_preserving_args_entry = __ pc();
aph@7879 1754 __ empty_expression_stack();
aph@7879 1755 // Set the popframe_processing bit in pending_popframe_condition
aph@7879 1756 // indicating that we are currently handling popframe, so that
aph@7879 1757 // call_VMs that may happen later do not trigger new popframe
aph@7879 1758 // handling cycles.
aph@7879 1759 __ ldrw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
aph@7879 1760 __ orr(r3, r3, JavaThread::popframe_processing_bit);
aph@7879 1761 __ strw(r3, Address(rthread, JavaThread::popframe_condition_offset()));
aph@7879 1762
aph@7879 1763 {
aph@7879 1764 // Check to see whether we are returning to a deoptimized frame.
aph@7879 1765 // (The PopFrame call ensures that the caller of the popped frame is
aph@7879 1766 // either interpreted or compiled and deoptimizes it if compiled.)
aph@7879 1767 // In this case, we can't call dispatch_next() after the frame is
aph@7879 1768 // popped, but instead must save the incoming arguments and restore
aph@7879 1769 // them after deoptimization has occurred.
aph@7879 1770 //
aph@7879 1771 // Note that we don't compare the return PC against the
aph@7879 1772 // deoptimization blob's unpack entry because of the presence of
aph@7879 1773 // adapter frames in C2.
aph@7879 1774 Label caller_not_deoptimized;
aph@7879 1775 __ ldr(c_rarg1, Address(rfp, frame::return_addr_offset * wordSize));
aph@7879 1776 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
aph@7879 1777 InterpreterRuntime::interpreter_contains), c_rarg1);
aph@7879 1778 __ cbnz(r0, caller_not_deoptimized);
aph@7879 1779
aph@7879 1780 // Compute size of arguments for saving when returning to
aph@7879 1781 // deoptimized caller
aph@7879 1782 __ get_method(r0);
aph@7879 1783 __ ldr(r0, Address(r0, Method::const_offset()));
aph@7879 1784 __ load_unsigned_short(r0, Address(r0, in_bytes(ConstMethod::
aph@7879 1785 size_of_parameters_offset())));
aph@7879 1786 __ lsl(r0, r0, Interpreter::logStackElementSize);
aph@7879 1787 __ restore_locals(); // XXX do we need this?
aph@7879 1788 __ sub(rlocals, rlocals, r0);
aph@7879 1789 __ add(rlocals, rlocals, wordSize);
aph@7879 1790 // Save these arguments
aph@7879 1791 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
aph@7879 1792 Deoptimization::
aph@7879 1793 popframe_preserve_args),
aph@7879 1794 rthread, r0, rlocals);
aph@7879 1795
aph@7879 1796 __ remove_activation(vtos,
aph@7879 1797 /* throw_monitor_exception */ false,
aph@7879 1798 /* install_monitor_exception */ false,
aph@7879 1799 /* notify_jvmdi */ false);
aph@7879 1800
aph@7879 1801 // Inform deoptimization that it is responsible for restoring
aph@7879 1802 // these arguments
aph@7879 1803 __ mov(rscratch1, JavaThread::popframe_force_deopt_reexecution_bit);
aph@7879 1804 __ strw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));
aph@7879 1805
aph@7879 1806 // Continue in deoptimization handler
aph@7879 1807 __ ret(lr);
aph@7879 1808
aph@7879 1809 __ bind(caller_not_deoptimized);
aph@7879 1810 }
aph@7879 1811
aph@7879 1812 __ remove_activation(vtos,
aph@7879 1813 /* throw_monitor_exception */ false,
aph@7879 1814 /* install_monitor_exception */ false,
aph@7879 1815 /* notify_jvmdi */ false);
aph@7879 1816
aph@7879 1817 // Restore the last_sp and null it out
aph@7879 1818 __ ldr(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 1819 __ str(zr, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));
aph@7879 1820
aph@7879 1821 __ restore_bcp();
aph@7879 1822 __ restore_locals();
aph@7879 1823 __ restore_constant_pool_cache();
aph@7879 1824 __ get_method(rmethod);
aph@7879 1825
aph@7879 1826 // The method data pointer was incremented already during
aph@7879 1827 // call profiling. We have to restore the mdp for the current bcp.
aph@7879 1828 if (ProfileInterpreter) {
aph@7879 1829 __ set_method_data_pointer_for_bcp();
aph@7879 1830 }
aph@7879 1831
aph@7879 1832 // Clear the popframe condition flag
aph@7879 1833 __ strw(zr, Address(rthread, JavaThread::popframe_condition_offset()));
aph@7879 1834 assert(JavaThread::popframe_inactive == 0, "fix popframe_inactive");
aph@7879 1835
aph@7879 1836 #if INCLUDE_JVMTI
aph@7879 1837 {
aph@7879 1838 Label L_done;
aph@7879 1839
aph@7879 1840 __ ldrb(rscratch1, Address(rbcp, 0));
aph@7879 1841 __ cmpw(r1, Bytecodes::_invokestatic);
aph@7879 1842 __ br(Assembler::EQ, L_done);
aph@7879 1843
aph@7879 1844 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
aph@7879 1845 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
aph@7879 1846
aph@7879 1847 __ ldr(c_rarg0, Address(rlocals, 0));
aph@7879 1848 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), c_rarg0, rmethod, rbcp);
aph@7879 1849
aph@7879 1850 __ cbz(r0, L_done);
aph@7879 1851
aph@7879 1852 __ str(r0, Address(esp, 0));
aph@7879 1853 __ bind(L_done);
aph@7879 1854 }
aph@7879 1855 #endif // INCLUDE_JVMTI
aph@7879 1856
aph@7879 1857 // Restore machine SP
aph@7879 1858 __ ldr(rscratch1, Address(rmethod, Method::const_offset()));
aph@7879 1859 __ ldrh(rscratch1, Address(rscratch1, ConstMethod::max_stack_offset()));
aph@7879 1860 __ add(rscratch1, rscratch1, frame::interpreter_frame_monitor_size() + 4);
aph@7879 1861 __ ldr(rscratch2,
aph@7879 1862 Address(rfp, frame::interpreter_frame_initial_sp_offset * wordSize));
aph@7879 1863 __ sub(rscratch1, rscratch2, rscratch1, ext::uxtw, 3);
aph@7879 1864 __ andr(sp, rscratch1, -16);
aph@7879 1865
aph@7879 1866 __ dispatch_next(vtos);
aph@7879 1867 // end of PopFrame support
aph@7879 1868
aph@7879 1869 Interpreter::_remove_activation_entry = __ pc();
aph@7879 1870
aph@7879 1871 // preserve exception over this code sequence
aph@7879 1872 __ pop_ptr(r0);
aph@7879 1873 __ str(r0, Address(rthread, JavaThread::vm_result_offset()));
aph@7879 1874 // remove the activation (without doing throws on illegalMonitorExceptions)
aph@7879 1875 __ remove_activation(vtos, false, true, false);
aph@7879 1876 // restore exception
aph@7879 1877 // restore exception
aph@7879 1878 __ get_vm_result(r0, rthread);
aph@7879 1879
aph@7879 1880 // In between activations - previous activation type unknown yet
aph@7879 1881 // compute continuation point - the continuation point expects the
aph@7879 1882 // following registers set up:
aph@7879 1883 //
aph@7879 1884 // r0: exception
aph@7879 1885 // lr: return address/pc that threw exception
aph@7879 1886 // rsp: expression stack of caller
aph@7879 1887 // rfp: fp of caller
aph@7879 1888 // FIXME: There's no point saving LR here because VM calls don't trash it
aph@7879 1889 __ stp(r0, lr, Address(__ pre(sp, -2 * wordSize))); // save exception & return address
aph@7879 1890 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
aph@7879 1891 SharedRuntime::exception_handler_for_return_address),
aph@7879 1892 rthread, lr);
aph@7879 1893 __ mov(r1, r0); // save exception handler
aph@7879 1894 __ ldp(r0, lr, Address(__ post(sp, 2 * wordSize))); // restore exception & return address
aph@7879 1895 // We might be returning to a deopt handler that expects r3 to
aph@7879 1896 // contain the exception pc
aph@7879 1897 __ mov(r3, lr);
aph@7879 1898 // Note that an "issuing PC" is actually the next PC after the call
aph@7879 1899 __ br(r1); // jump to exception
aph@7879 1900 // handler of caller
aph@7879 1901 }
aph@7879 1902
aph@7879 1903
aph@7879 1904 //
aph@7879 1905 // JVMTI ForceEarlyReturn support
aph@7879 1906 //
aph@7879 1907 address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
aph@7879 1908 address entry = __ pc();
aph@7879 1909
aph@7879 1910 __ restore_bcp();
aph@7879 1911 __ restore_locals();
aph@7879 1912 __ empty_expression_stack();
aph@7879 1913 __ load_earlyret_value(state);
aph@7879 1914
aph@7879 1915 __ ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
aph@7879 1916 Address cond_addr(rscratch1, JvmtiThreadState::earlyret_state_offset());
aph@7879 1917
aph@7879 1918 // Clear the earlyret state
aph@7879 1919 assert(JvmtiThreadState::earlyret_inactive == 0, "should be");
aph@7879 1920 __ str(zr, cond_addr);
aph@7879 1921
aph@7879 1922 __ remove_activation(state,
aph@7879 1923 false, /* throw_monitor_exception */
aph@7879 1924 false, /* install_monitor_exception */
aph@7879 1925 true); /* notify_jvmdi */
aph@7879 1926 __ ret(lr);
aph@7879 1927
aph@7879 1928 return entry;
aph@7879 1929 } // end of ForceEarlyReturn support
aph@7879 1930
aph@7879 1931
aph@7879 1932
aph@7879 1933 //-----------------------------------------------------------------------------
aph@7879 1934 // Helper for vtos entry point generation
aph@7879 1935
aph@7879 1936 void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
aph@7879 1937 address& bep,
aph@7879 1938 address& cep,
aph@7879 1939 address& sep,
aph@7879 1940 address& aep,
aph@7879 1941 address& iep,
aph@7879 1942 address& lep,
aph@7879 1943 address& fep,
aph@7879 1944 address& dep,
aph@7879 1945 address& vep) {
aph@7879 1946 assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
aph@7879 1947 Label L;
aph@7879 1948 aep = __ pc(); __ push_ptr(); __ b(L);
aph@7879 1949 fep = __ pc(); __ push_f(); __ b(L);
aph@7879 1950 dep = __ pc(); __ push_d(); __ b(L);
aph@7879 1951 lep = __ pc(); __ push_l(); __ b(L);
aph@7879 1952 bep = cep = sep =
aph@7879 1953 iep = __ pc(); __ push_i();
aph@7879 1954 vep = __ pc();
aph@7879 1955 __ bind(L);
aph@7879 1956 generate_and_dispatch(t);
aph@7879 1957 }
aph@7879 1958
aph@7879 1959 //-----------------------------------------------------------------------------
aph@7879 1960
aph@7879 1961 // Non-product code
aph@7879 1962 #ifndef PRODUCT
aph@7879 1963 address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
aph@7879 1964 address entry = __ pc();
aph@7879 1965
aph@7879 1966 __ push(lr);
aph@7879 1967 __ push(state);
aph@7879 1968 __ push(RegSet::range(r0, r15), sp);
aph@7879 1969 __ mov(c_rarg2, r0); // Pass itos
aph@7879 1970 __ call_VM(noreg,
coleenp@10665 1971 CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode),
aph@7879 1972 c_rarg1, c_rarg2, c_rarg3);
aph@7879 1973 __ pop(RegSet::range(r0, r15), sp);
aph@7879 1974 __ pop(state);
aph@7879 1975 __ pop(lr);
aph@7879 1976 __ ret(lr); // return from result handler
aph@7879 1977
aph@7879 1978 return entry;
aph@7879 1979 }
aph@7879 1980
aph@7879 1981 void TemplateInterpreterGenerator::count_bytecode() {
enevill@8882 1982 Register rscratch3 = r0;
aph@7879 1983 __ push(rscratch1);
aph@7879 1984 __ push(rscratch2);
enevill@8882 1985 __ push(rscratch3);
enevill@10782 1986 __ mov(rscratch3, (address) &BytecodeCounter::_counter_value);
enevill@10782 1987 __ atomic_add(noreg, 1, rscratch3);
enevill@8882 1988 __ pop(rscratch3);
aph@7879 1989 __ pop(rscratch2);
aph@7879 1990 __ pop(rscratch1);
aph@7879 1991 }
aph@7879 1992
aph@7879 1993 void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { ; }
aph@7879 1994
aph@7879 1995 void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { ; }
aph@7879 1996
aph@7879 1997
aph@7879 1998 void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
aph@7879 1999 // Call a little run-time stub to avoid blow-up for each bytecode.
aph@7879 2000 // The run-time runtime saves the right registers, depending on
aph@7879 2001 // the tosca in-state for the given template.
aph@7879 2002
aph@7879 2003 assert(Interpreter::trace_code(t->tos_in()) != NULL,
aph@7879 2004 "entry must have been generated");
aph@7879 2005 __ bl(Interpreter::trace_code(t->tos_in()));
aph@7879 2006 __ reinit_heapbase();
aph@7879 2007 }
aph@7879 2008
aph@7879 2009
aph@7879 2010 void TemplateInterpreterGenerator::stop_interpreter_at() {
aph@7879 2011 Label L;
aph@7879 2012 __ push(rscratch1);
aph@7879 2013 __ mov(rscratch1, (address) &BytecodeCounter::_counter_value);
aph@7879 2014 __ ldr(rscratch1, Address(rscratch1));
aph@7879 2015 __ mov(rscratch2, StopInterpreterAt);
aph@7879 2016 __ cmpw(rscratch1, rscratch2);
aph@7879 2017 __ br(Assembler::NE, L);
aph@7879 2018 __ brk(0);
aph@7879 2019 __ bind(L);
aph@7879 2020 __ pop(rscratch1);
aph@7879 2021 }
aph@7879 2022
aph@7879 2023 #ifdef BUILTIN_SIM
aph@7879 2024
aph@7879 2025 #include <sys/mman.h>
aph@7879 2026 #include <unistd.h>
aph@7879 2027
aph@7879 2028 extern "C" {
aph@7879 2029 static int PAGESIZE = getpagesize();
aph@7879 2030 int is_mapped_address(u_int64_t address)
aph@7879 2031 {
aph@7879 2032 address = (address & ~((u_int64_t)PAGESIZE - 1));
aph@7879 2033 if (msync((void *)address, PAGESIZE, MS_ASYNC) == 0) {
aph@7879 2034 return true;
aph@7879 2035 }
aph@7879 2036 if (errno != ENOMEM) {
aph@7879 2037 return true;
aph@7879 2038 }
aph@7879 2039 return false;
aph@7879 2040 }
aph@7879 2041
aph@7879 2042 void bccheck1(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
aph@7879 2043 {
aph@7879 2044 if (method != 0) {
aph@7879 2045 method[0] = '\0';
aph@7879 2046 }
aph@7879 2047 if (bcidx != 0) {
aph@7879 2048 *bcidx = -2;
aph@7879 2049 }
aph@7879 2050 if (decode != 0) {
aph@7879 2051 decode[0] = 0;
aph@7879 2052 }
aph@7879 2053
aph@7879 2054 if (framesize != 0) {
aph@7879 2055 *framesize = -1;
aph@7879 2056 }
aph@7879 2057
aph@7879 2058 if (Interpreter::contains((address)pc)) {
aph@7879 2059 AArch64Simulator *sim = AArch64Simulator::get_current(UseSimulatorCache, DisableBCCheck);
aph@7879 2060 Method* meth;
aph@7879 2061 address bcp;
aph@7879 2062 if (fp) {
aph@7879 2063 #define FRAME_SLOT_METHOD 3
aph@7879 2064 #define FRAME_SLOT_BCP 7
aph@7879 2065 meth = (Method*)sim->getMemory()->loadU64(fp - (FRAME_SLOT_METHOD << 3));
aph@7879 2066 bcp = (address)sim->getMemory()->loadU64(fp - (FRAME_SLOT_BCP << 3));
aph@7879 2067 #undef FRAME_SLOT_METHOD
aph@7879 2068 #undef FRAME_SLOT_BCP
aph@7879 2069 } else {
aph@7879 2070 meth = (Method*)sim->getCPUState().xreg(RMETHOD, 0);
aph@7879 2071 bcp = (address)sim->getCPUState().xreg(RBCP, 0);
aph@7879 2072 }
aph@7879 2073 if (meth->is_native()) {
aph@7879 2074 return;
aph@7879 2075 }
aph@7879 2076 if(method && meth->is_method()) {
aph@7879 2077 ResourceMark rm;
aph@7879 2078 method[0] = 'I';
aph@7879 2079 method[1] = ' ';
aph@7879 2080 meth->name_and_sig_as_C_string(method + 2, 398);
aph@7879 2081 }
aph@7879 2082 if (bcidx) {
aph@7879 2083 if (meth->contains(bcp)) {
aph@7879 2084 *bcidx = meth->bci_from(bcp);
aph@7879 2085 } else {
aph@7879 2086 *bcidx = -2;
aph@7879 2087 }
aph@7879 2088 }
aph@7879 2089 if (decode) {
aph@7879 2090 if (!BytecodeTracer::closure()) {
aph@7879 2091 BytecodeTracer::set_closure(BytecodeTracer::std_closure());
aph@7879 2092 }
aph@7879 2093 stringStream str(decode, 400);
aph@7879 2094 BytecodeTracer::trace(meth, bcp, &str);
aph@7879 2095 }
aph@7879 2096 } else {
aph@7879 2097 if (method) {
aph@7879 2098 CodeBlob *cb = CodeCache::find_blob((address)pc);
aph@7879 2099 if (cb != NULL) {
aph@7879 2100 if (cb->is_nmethod()) {
aph@7879 2101 ResourceMark rm;
aph@7879 2102 nmethod* nm = (nmethod*)cb;
aph@7879 2103 method[0] = 'C';
aph@7879 2104 method[1] = ' ';
aph@7879 2105 nm->method()->name_and_sig_as_C_string(method + 2, 398);
aph@7879 2106 } else if (cb->is_adapter_blob()) {
aph@7879 2107 strcpy(method, "B adapter blob");
aph@7879 2108 } else if (cb->is_runtime_stub()) {
aph@7879 2109 strcpy(method, "B runtime stub");
aph@7879 2110 } else if (cb->is_exception_stub()) {
aph@7879 2111 strcpy(method, "B exception stub");
aph@7879 2112 } else if (cb->is_deoptimization_stub()) {
aph@7879 2113 strcpy(method, "B deoptimization stub");
aph@7879 2114 } else if (cb->is_safepoint_stub()) {
aph@7879 2115 strcpy(method, "B safepoint stub");
aph@7879 2116 } else if (cb->is_uncommon_trap_stub()) {
aph@7879 2117 strcpy(method, "B uncommon trap stub");
aph@7879 2118 } else if (cb->contains((address)StubRoutines::call_stub())) {
aph@7879 2119 strcpy(method, "B call stub");
aph@7879 2120 } else {
aph@7879 2121 strcpy(method, "B unknown blob : ");
aph@7879 2122 strcat(method, cb->name());
aph@7879 2123 }
aph@7879 2124 if (framesize != NULL) {
aph@7879 2125 *framesize = cb->frame_size();
aph@7879 2126 }
aph@7879 2127 }
aph@7879 2128 }
aph@7879 2129 }
aph@7879 2130 }
aph@7879 2131
aph@7879 2132
aph@7879 2133 JNIEXPORT void bccheck(u_int64_t pc, u_int64_t fp, char *method, int *bcidx, int *framesize, char *decode)
aph@7879 2134 {
aph@7879 2135 bccheck1(pc, fp, method, bcidx, framesize, decode);
aph@7879 2136 }
aph@7879 2137 }
aph@7879 2138
aph@7879 2139 #endif // BUILTIN_SIM
aph@7879 2140 #endif // !PRODUCT