annotate src/cpu/x86/vm/templateTable_x86_32.cpp @ 1838:3b2dea75431e

6984311: JSR 292 needs optional bootstrap method parameters Summary: Allow CONSTANT_InvokeDynamic nodes to have any number of extra operands. Reviewed-by: twisti
author jrose
date Sat, 30 Oct 2010 13:08:23 -0700
parents d55217dc206f
children f95d63e2154a
rev   line source
duke@0 1 /*
trims@1472 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 #include "incls/_precompiled.incl"
duke@0 26 #include "incls/_templateTable_x86_32.cpp.incl"
duke@0 27
duke@0 28 #ifndef CC_INTERP
duke@0 29 #define __ _masm->
duke@0 30
duke@0 31 //----------------------------------------------------------------------------------------------------
duke@0 32 // Platform-dependent initialization
duke@0 33
duke@0 34 void TemplateTable::pd_initialize() {
duke@0 35 // No i486 specific initialization
duke@0 36 }
duke@0 37
duke@0 38 //----------------------------------------------------------------------------------------------------
duke@0 39 // Address computation
duke@0 40
duke@0 41 // local variables
duke@0 42 static inline Address iaddress(int n) {
duke@0 43 return Address(rdi, Interpreter::local_offset_in_bytes(n));
duke@0 44 }
duke@0 45
duke@0 46 static inline Address laddress(int n) { return iaddress(n + 1); }
duke@0 47 static inline Address haddress(int n) { return iaddress(n + 0); }
duke@0 48 static inline Address faddress(int n) { return iaddress(n); }
duke@0 49 static inline Address daddress(int n) { return laddress(n); }
duke@0 50 static inline Address aaddress(int n) { return iaddress(n); }
duke@0 51
duke@0 52 static inline Address iaddress(Register r) {
twisti@1426 53 return Address(rdi, r, Interpreter::stackElementScale());
duke@0 54 }
duke@0 55 static inline Address laddress(Register r) {
duke@0 56 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
duke@0 57 }
duke@0 58 static inline Address haddress(Register r) {
duke@0 59 return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
duke@0 60 }
duke@0 61
twisti@1426 62 static inline Address faddress(Register r) { return iaddress(r); }
twisti@1426 63 static inline Address daddress(Register r) { return laddress(r); }
twisti@1426 64 static inline Address aaddress(Register r) { return iaddress(r); }
duke@0 65
duke@0 66 // expression stack
duke@0 67 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
duke@0 68 // data beyond the rsp which is potentially unsafe in an MT environment;
duke@0 69 // an interrupt may overwrite that data.)
duke@0 70 static inline Address at_rsp () {
duke@0 71 return Address(rsp, 0);
duke@0 72 }
duke@0 73
duke@0 74 // At top of Java expression stack which may be different than rsp(). It
duke@0 75 // isn't for category 1 objects.
duke@0 76 static inline Address at_tos () {
duke@0 77 Address tos = Address(rsp, Interpreter::expr_offset_in_bytes(0));
duke@0 78 return tos;
duke@0 79 }
duke@0 80
duke@0 81 static inline Address at_tos_p1() {
duke@0 82 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
duke@0 83 }
duke@0 84
duke@0 85 static inline Address at_tos_p2() {
duke@0 86 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
duke@0 87 }
duke@0 88
duke@0 89 // Condition conversion
duke@0 90 static Assembler::Condition j_not(TemplateTable::Condition cc) {
duke@0 91 switch (cc) {
duke@0 92 case TemplateTable::equal : return Assembler::notEqual;
duke@0 93 case TemplateTable::not_equal : return Assembler::equal;
duke@0 94 case TemplateTable::less : return Assembler::greaterEqual;
duke@0 95 case TemplateTable::less_equal : return Assembler::greater;
duke@0 96 case TemplateTable::greater : return Assembler::lessEqual;
duke@0 97 case TemplateTable::greater_equal: return Assembler::less;
duke@0 98 }
duke@0 99 ShouldNotReachHere();
duke@0 100 return Assembler::zero;
duke@0 101 }
duke@0 102
duke@0 103
duke@0 104 //----------------------------------------------------------------------------------------------------
duke@0 105 // Miscelaneous helper routines
duke@0 106
ysr@342 107 // Store an oop (or NULL) at the address described by obj.
ysr@342 108 // If val == noreg this means store a NULL
ysr@342 109
ysr@342 110 static void do_oop_store(InterpreterMacroAssembler* _masm,
ysr@342 111 Address obj,
ysr@342 112 Register val,
ysr@342 113 BarrierSet::Name barrier,
ysr@342 114 bool precise) {
ysr@342 115 assert(val == noreg || val == rax, "parameter is just for looks");
ysr@342 116 switch (barrier) {
ysr@342 117 #ifndef SERIALGC
ysr@342 118 case BarrierSet::G1SATBCT:
ysr@342 119 case BarrierSet::G1SATBCTLogging:
ysr@342 120 {
ysr@342 121 // flatten object address if needed
ysr@342 122 // We do it regardless of precise because we need the registers
ysr@342 123 if (obj.index() == noreg && obj.disp() == 0) {
ysr@342 124 if (obj.base() != rdx) {
ysr@342 125 __ movl(rdx, obj.base());
ysr@342 126 }
ysr@342 127 } else {
ysr@342 128 __ leal(rdx, obj);
ysr@342 129 }
ysr@342 130 __ get_thread(rcx);
ysr@342 131 __ save_bcp();
ysr@342 132 __ g1_write_barrier_pre(rdx, rcx, rsi, rbx, val != noreg);
ysr@342 133
ysr@342 134 // Do the actual store
ysr@342 135 // noreg means NULL
ysr@342 136 if (val == noreg) {
xlu@533 137 __ movptr(Address(rdx, 0), NULL_WORD);
ysr@342 138 // No post barrier for NULL
ysr@342 139 } else {
ysr@342 140 __ movl(Address(rdx, 0), val);
ysr@342 141 __ g1_write_barrier_post(rdx, rax, rcx, rbx, rsi);
ysr@342 142 }
ysr@342 143 __ restore_bcp();
ysr@342 144
ysr@342 145 }
ysr@342 146 break;
ysr@342 147 #endif // SERIALGC
ysr@342 148 case BarrierSet::CardTableModRef:
ysr@342 149 case BarrierSet::CardTableExtension:
ysr@342 150 {
ysr@342 151 if (val == noreg) {
xlu@533 152 __ movptr(obj, NULL_WORD);
ysr@342 153 } else {
ysr@342 154 __ movl(obj, val);
ysr@342 155 // flatten object address if needed
ysr@342 156 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
ysr@342 157 __ store_check(obj.base());
ysr@342 158 } else {
ysr@342 159 __ leal(rdx, obj);
ysr@342 160 __ store_check(rdx);
ysr@342 161 }
ysr@342 162 }
ysr@342 163 }
ysr@342 164 break;
ysr@342 165 case BarrierSet::ModRef:
ysr@342 166 case BarrierSet::Other:
ysr@342 167 if (val == noreg) {
xlu@533 168 __ movptr(obj, NULL_WORD);
ysr@342 169 } else {
ysr@342 170 __ movl(obj, val);
ysr@342 171 }
ysr@342 172 break;
ysr@342 173 default :
ysr@342 174 ShouldNotReachHere();
ysr@342 175
ysr@342 176 }
ysr@342 177 }
ysr@342 178
duke@0 179 Address TemplateTable::at_bcp(int offset) {
duke@0 180 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
duke@0 181 return Address(rsi, offset);
duke@0 182 }
duke@0 183
duke@0 184
duke@0 185 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
duke@0 186 Register scratch,
duke@0 187 bool load_bc_into_scratch/*=true*/) {
duke@0 188
duke@0 189 if (!RewriteBytecodes) return;
duke@0 190 // the pair bytecodes have already done the load.
never@304 191 if (load_bc_into_scratch) {
never@304 192 __ movl(bc, bytecode);
never@304 193 }
duke@0 194 Label patch_done;
duke@0 195 if (JvmtiExport::can_post_breakpoint()) {
duke@0 196 Label fast_patch;
duke@0 197 // if a breakpoint is present we can't rewrite the stream directly
never@304 198 __ movzbl(scratch, at_bcp(0));
duke@0 199 __ cmpl(scratch, Bytecodes::_breakpoint);
duke@0 200 __ jcc(Assembler::notEqual, fast_patch);
duke@0 201 __ get_method(scratch);
duke@0 202 // Let breakpoint table handling rewrite to quicker bytecode
duke@0 203 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, rsi, bc);
duke@0 204 #ifndef ASSERT
duke@0 205 __ jmpb(patch_done);
jrose@726 206 #else
jrose@726 207 __ jmp(patch_done);
jrose@726 208 #endif
duke@0 209 __ bind(fast_patch);
duke@0 210 }
jrose@726 211 #ifdef ASSERT
duke@0 212 Label okay;
duke@0 213 __ load_unsigned_byte(scratch, at_bcp(0));
duke@0 214 __ cmpl(scratch, (int)Bytecodes::java_code(bytecode));
duke@0 215 __ jccb(Assembler::equal, okay);
duke@0 216 __ cmpl(scratch, bc);
duke@0 217 __ jcc(Assembler::equal, okay);
duke@0 218 __ stop("patching the wrong bytecode");
duke@0 219 __ bind(okay);
duke@0 220 #endif
duke@0 221 // patch bytecode
duke@0 222 __ movb(at_bcp(0), bc);
duke@0 223 __ bind(patch_done);
duke@0 224 }
duke@0 225
duke@0 226 //----------------------------------------------------------------------------------------------------
duke@0 227 // Individual instructions
duke@0 228
duke@0 229 void TemplateTable::nop() {
duke@0 230 transition(vtos, vtos);
duke@0 231 // nothing to do
duke@0 232 }
duke@0 233
duke@0 234 void TemplateTable::shouldnotreachhere() {
duke@0 235 transition(vtos, vtos);
duke@0 236 __ stop("shouldnotreachhere bytecode");
duke@0 237 }
duke@0 238
duke@0 239
duke@0 240
duke@0 241 void TemplateTable::aconst_null() {
duke@0 242 transition(vtos, atos);
never@304 243 __ xorptr(rax, rax);
duke@0 244 }
duke@0 245
duke@0 246
duke@0 247 void TemplateTable::iconst(int value) {
duke@0 248 transition(vtos, itos);
duke@0 249 if (value == 0) {
never@304 250 __ xorptr(rax, rax);
duke@0 251 } else {
never@304 252 __ movptr(rax, value);
duke@0 253 }
duke@0 254 }
duke@0 255
duke@0 256
duke@0 257 void TemplateTable::lconst(int value) {
duke@0 258 transition(vtos, ltos);
duke@0 259 if (value == 0) {
never@304 260 __ xorptr(rax, rax);
duke@0 261 } else {
never@304 262 __ movptr(rax, value);
duke@0 263 }
duke@0 264 assert(value >= 0, "check this code");
never@304 265 __ xorptr(rdx, rdx);
duke@0 266 }
duke@0 267
duke@0 268
duke@0 269 void TemplateTable::fconst(int value) {
duke@0 270 transition(vtos, ftos);
duke@0 271 if (value == 0) { __ fldz();
duke@0 272 } else if (value == 1) { __ fld1();
duke@0 273 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
duke@0 274 } else { ShouldNotReachHere();
duke@0 275 }
duke@0 276 }
duke@0 277
duke@0 278
duke@0 279 void TemplateTable::dconst(int value) {
duke@0 280 transition(vtos, dtos);
duke@0 281 if (value == 0) { __ fldz();
duke@0 282 } else if (value == 1) { __ fld1();
duke@0 283 } else { ShouldNotReachHere();
duke@0 284 }
duke@0 285 }
duke@0 286
duke@0 287
duke@0 288 void TemplateTable::bipush() {
duke@0 289 transition(vtos, itos);
duke@0 290 __ load_signed_byte(rax, at_bcp(1));
duke@0 291 }
duke@0 292
duke@0 293
duke@0 294 void TemplateTable::sipush() {
duke@0 295 transition(vtos, itos);
jrose@622 296 __ load_unsigned_short(rax, at_bcp(1));
never@304 297 __ bswapl(rax);
duke@0 298 __ sarl(rax, 16);
duke@0 299 }
duke@0 300
duke@0 301 void TemplateTable::ldc(bool wide) {
duke@0 302 transition(vtos, vtos);
duke@0 303 Label call_ldc, notFloat, notClass, Done;
duke@0 304
duke@0 305 if (wide) {
duke@0 306 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
duke@0 307 } else {
duke@0 308 __ load_unsigned_byte(rbx, at_bcp(1));
duke@0 309 }
duke@0 310 __ get_cpool_and_tags(rcx, rax);
duke@0 311 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
duke@0 312 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
duke@0 313
duke@0 314 // get type
never@304 315 __ xorptr(rdx, rdx);
duke@0 316 __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
duke@0 317
duke@0 318 // unresolved string - get the resolved string
duke@0 319 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString);
duke@0 320 __ jccb(Assembler::equal, call_ldc);
duke@0 321
duke@0 322 // unresolved class - get the resolved class
duke@0 323 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
duke@0 324 __ jccb(Assembler::equal, call_ldc);
duke@0 325
duke@0 326 // unresolved class in error (resolution failed) - call into runtime
duke@0 327 // so that the same error from first resolution attempt is thrown.
duke@0 328 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
duke@0 329 __ jccb(Assembler::equal, call_ldc);
duke@0 330
duke@0 331 // resolved class - need to call vm to get java mirror of the class
duke@0 332 __ cmpl(rdx, JVM_CONSTANT_Class);
duke@0 333 __ jcc(Assembler::notEqual, notClass);
duke@0 334
duke@0 335 __ bind(call_ldc);
duke@0 336 __ movl(rcx, wide);
duke@0 337 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
duke@0 338 __ push(atos);
duke@0 339 __ jmp(Done);
duke@0 340
duke@0 341 __ bind(notClass);
duke@0 342 __ cmpl(rdx, JVM_CONSTANT_Float);
duke@0 343 __ jccb(Assembler::notEqual, notFloat);
duke@0 344 // ftos
never@304 345 __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
duke@0 346 __ push(ftos);
duke@0 347 __ jmp(Done);
duke@0 348
duke@0 349 __ bind(notFloat);
duke@0 350 #ifdef ASSERT
duke@0 351 { Label L;
duke@0 352 __ cmpl(rdx, JVM_CONSTANT_Integer);
duke@0 353 __ jcc(Assembler::equal, L);
duke@0 354 __ cmpl(rdx, JVM_CONSTANT_String);
duke@0 355 __ jcc(Assembler::equal, L);
duke@0 356 __ stop("unexpected tag type in ldc");
duke@0 357 __ bind(L);
duke@0 358 }
duke@0 359 #endif
duke@0 360 Label isOop;
duke@0 361 // atos and itos
duke@0 362 // String is only oop type we will see here
duke@0 363 __ cmpl(rdx, JVM_CONSTANT_String);
duke@0 364 __ jccb(Assembler::equal, isOop);
never@304 365 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
duke@0 366 __ push(itos);
duke@0 367 __ jmp(Done);
duke@0 368 __ bind(isOop);
never@304 369 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
duke@0 370 __ push(atos);
duke@0 371
duke@0 372 if (VerifyOops) {
duke@0 373 __ verify_oop(rax);
duke@0 374 }
duke@0 375 __ bind(Done);
duke@0 376 }
duke@0 377
jrose@1524 378 // Fast path for caching oop constants.
jrose@1524 379 // %%% We should use this to handle Class and String constants also.
jrose@1524 380 // %%% It will simplify the ldc/primitive path considerably.
jrose@1524 381 void TemplateTable::fast_aldc(bool wide) {
jrose@1524 382 transition(vtos, atos);
jrose@1524 383
jrose@1524 384 if (!EnableMethodHandles) {
jrose@1524 385 // We should not encounter this bytecode if !EnableMethodHandles.
jrose@1524 386 // The verifier will stop it. However, if we get past the verifier,
jrose@1524 387 // this will stop the thread in a reasonable way, without crashing the JVM.
jrose@1524 388 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
jrose@1524 389 InterpreterRuntime::throw_IncompatibleClassChangeError));
jrose@1524 390 // the call_VM checks for exception, so we should never return here.
jrose@1524 391 __ should_not_reach_here();
jrose@1524 392 return;
jrose@1524 393 }
jrose@1524 394
jrose@1524 395 const Register cache = rcx;
jrose@1524 396 const Register index = rdx;
jrose@1524 397
jrose@1524 398 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1));
jrose@1524 399 if (VerifyOops) {
jrose@1524 400 __ verify_oop(rax);
jrose@1524 401 }
jrose@1838 402
jrose@1838 403 Label L_done, L_throw_exception;
jrose@1838 404 const Register con_klass_temp = rcx; // same as Rcache
jrose@1838 405 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
jrose@1838 406 __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
jrose@1838 407 __ jcc(Assembler::notEqual, L_done);
jrose@1838 408 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
jrose@1838 409 __ jcc(Assembler::notEqual, L_throw_exception);
jrose@1838 410 __ xorptr(rax, rax);
jrose@1838 411 __ jmp(L_done);
jrose@1838 412
jrose@1838 413 // Load the exception from the system-array which wraps it:
jrose@1838 414 __ bind(L_throw_exception);
jrose@1838 415 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
jrose@1838 416 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
jrose@1838 417
jrose@1838 418 __ bind(L_done);
jrose@1524 419 }
jrose@1524 420
duke@0 421 void TemplateTable::ldc2_w() {
duke@0 422 transition(vtos, vtos);
duke@0 423 Label Long, Done;
duke@0 424 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
duke@0 425
duke@0 426 __ get_cpool_and_tags(rcx, rax);
duke@0 427 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
duke@0 428 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
duke@0 429
duke@0 430 // get type
duke@0 431 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
duke@0 432 __ jccb(Assembler::notEqual, Long);
duke@0 433 // dtos
never@304 434 __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
duke@0 435 __ push(dtos);
duke@0 436 __ jmpb(Done);
duke@0 437
duke@0 438 __ bind(Long);
duke@0 439 // ltos
never@304 440 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
never@304 441 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
duke@0 442
duke@0 443 __ push(ltos);
duke@0 444
duke@0 445 __ bind(Done);
duke@0 446 }
duke@0 447
duke@0 448
duke@0 449 void TemplateTable::locals_index(Register reg, int offset) {
duke@0 450 __ load_unsigned_byte(reg, at_bcp(offset));
never@304 451 __ negptr(reg);
duke@0 452 }
duke@0 453
duke@0 454
duke@0 455 void TemplateTable::iload() {
duke@0 456 transition(vtos, itos);
duke@0 457 if (RewriteFrequentPairs) {
duke@0 458 Label rewrite, done;
duke@0 459
duke@0 460 // get next byte
duke@0 461 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
duke@0 462 // if _iload, wait to rewrite to iload2. We only want to rewrite the
duke@0 463 // last two iloads in a pair. Comparing against fast_iload means that
duke@0 464 // the next bytecode is neither an iload or a caload, and therefore
duke@0 465 // an iload pair.
duke@0 466 __ cmpl(rbx, Bytecodes::_iload);
duke@0 467 __ jcc(Assembler::equal, done);
duke@0 468
duke@0 469 __ cmpl(rbx, Bytecodes::_fast_iload);
duke@0 470 __ movl(rcx, Bytecodes::_fast_iload2);
duke@0 471 __ jccb(Assembler::equal, rewrite);
duke@0 472
duke@0 473 // if _caload, rewrite to fast_icaload
duke@0 474 __ cmpl(rbx, Bytecodes::_caload);
duke@0 475 __ movl(rcx, Bytecodes::_fast_icaload);
duke@0 476 __ jccb(Assembler::equal, rewrite);
duke@0 477
duke@0 478 // rewrite so iload doesn't check again.
duke@0 479 __ movl(rcx, Bytecodes::_fast_iload);
duke@0 480
duke@0 481 // rewrite
duke@0 482 // rcx: fast bytecode
duke@0 483 __ bind(rewrite);
duke@0 484 patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
duke@0 485 __ bind(done);
duke@0 486 }
duke@0 487
duke@0 488 // Get the local value into tos
duke@0 489 locals_index(rbx);
duke@0 490 __ movl(rax, iaddress(rbx));
duke@0 491 }
duke@0 492
duke@0 493
duke@0 494 void TemplateTable::fast_iload2() {
duke@0 495 transition(vtos, itos);
duke@0 496 locals_index(rbx);
duke@0 497 __ movl(rax, iaddress(rbx));
duke@0 498 __ push(itos);
duke@0 499 locals_index(rbx, 3);
duke@0 500 __ movl(rax, iaddress(rbx));
duke@0 501 }
duke@0 502
duke@0 503 void TemplateTable::fast_iload() {
duke@0 504 transition(vtos, itos);
duke@0 505 locals_index(rbx);
duke@0 506 __ movl(rax, iaddress(rbx));
duke@0 507 }
duke@0 508
duke@0 509
duke@0 510 void TemplateTable::lload() {
duke@0 511 transition(vtos, ltos);
duke@0 512 locals_index(rbx);
never@304 513 __ movptr(rax, laddress(rbx));
never@304 514 NOT_LP64(__ movl(rdx, haddress(rbx)));
duke@0 515 }
duke@0 516
duke@0 517
duke@0 518 void TemplateTable::fload() {
duke@0 519 transition(vtos, ftos);
duke@0 520 locals_index(rbx);
duke@0 521 __ fld_s(faddress(rbx));
duke@0 522 }
duke@0 523
duke@0 524
duke@0 525 void TemplateTable::dload() {
duke@0 526 transition(vtos, dtos);
duke@0 527 locals_index(rbx);
twisti@1426 528 __ fld_d(daddress(rbx));
duke@0 529 }
duke@0 530
duke@0 531
duke@0 532 void TemplateTable::aload() {
duke@0 533 transition(vtos, atos);
duke@0 534 locals_index(rbx);
never@304 535 __ movptr(rax, aaddress(rbx));
duke@0 536 }
duke@0 537
duke@0 538
duke@0 539 void TemplateTable::locals_index_wide(Register reg) {
duke@0 540 __ movl(reg, at_bcp(2));
never@304 541 __ bswapl(reg);
duke@0 542 __ shrl(reg, 16);
never@304 543 __ negptr(reg);
duke@0 544 }
duke@0 545
duke@0 546
duke@0 547 void TemplateTable::wide_iload() {
duke@0 548 transition(vtos, itos);
duke@0 549 locals_index_wide(rbx);
duke@0 550 __ movl(rax, iaddress(rbx));
duke@0 551 }
duke@0 552
duke@0 553
duke@0 554 void TemplateTable::wide_lload() {
duke@0 555 transition(vtos, ltos);
duke@0 556 locals_index_wide(rbx);
never@304 557 __ movptr(rax, laddress(rbx));
never@304 558 NOT_LP64(__ movl(rdx, haddress(rbx)));
duke@0 559 }
duke@0 560
duke@0 561
duke@0 562 void TemplateTable::wide_fload() {
duke@0 563 transition(vtos, ftos);
duke@0 564 locals_index_wide(rbx);
duke@0 565 __ fld_s(faddress(rbx));
duke@0 566 }
duke@0 567
duke@0 568
duke@0 569 void TemplateTable::wide_dload() {
duke@0 570 transition(vtos, dtos);
duke@0 571 locals_index_wide(rbx);
twisti@1426 572 __ fld_d(daddress(rbx));
duke@0 573 }
duke@0 574
duke@0 575
duke@0 576 void TemplateTable::wide_aload() {
duke@0 577 transition(vtos, atos);
duke@0 578 locals_index_wide(rbx);
never@304 579 __ movptr(rax, aaddress(rbx));
duke@0 580 }
duke@0 581
duke@0 582 void TemplateTable::index_check(Register array, Register index) {
duke@0 583 // Pop ptr into array
duke@0 584 __ pop_ptr(array);
duke@0 585 index_check_without_pop(array, index);
duke@0 586 }
duke@0 587
duke@0 588 void TemplateTable::index_check_without_pop(Register array, Register index) {
duke@0 589 // destroys rbx,
duke@0 590 // check array
duke@0 591 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
never@304 592 LP64_ONLY(__ movslq(index, index));
duke@0 593 // check index
duke@0 594 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
duke@0 595 if (index != rbx) {
duke@0 596 // ??? convention: move aberrant index into rbx, for exception message
duke@0 597 assert(rbx != array, "different registers");
never@304 598 __ mov(rbx, index);
duke@0 599 }
duke@0 600 __ jump_cc(Assembler::aboveEqual,
duke@0 601 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
duke@0 602 }
duke@0 603
duke@0 604
duke@0 605 void TemplateTable::iaload() {
duke@0 606 transition(itos, itos);
duke@0 607 // rdx: array
duke@0 608 index_check(rdx, rax); // kills rbx,
duke@0 609 // rax,: index
duke@0 610 __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
duke@0 611 }
duke@0 612
duke@0 613
duke@0 614 void TemplateTable::laload() {
duke@0 615 transition(itos, ltos);
duke@0 616 // rax,: index
duke@0 617 // rdx: array
duke@0 618 index_check(rdx, rax);
never@304 619 __ mov(rbx, rax);
duke@0 620 // rbx,: index
never@304 621 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
never@304 622 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
duke@0 623 }
duke@0 624
duke@0 625
duke@0 626 void TemplateTable::faload() {
duke@0 627 transition(itos, ftos);
duke@0 628 // rdx: array
duke@0 629 index_check(rdx, rax); // kills rbx,
duke@0 630 // rax,: index
duke@0 631 __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
duke@0 632 }
duke@0 633
duke@0 634
duke@0 635 void TemplateTable::daload() {
duke@0 636 transition(itos, dtos);
duke@0 637 // rdx: array
duke@0 638 index_check(rdx, rax); // kills rbx,
duke@0 639 // rax,: index
duke@0 640 __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
duke@0 641 }
duke@0 642
duke@0 643
duke@0 644 void TemplateTable::aaload() {
duke@0 645 transition(itos, atos);
duke@0 646 // rdx: array
duke@0 647 index_check(rdx, rax); // kills rbx,
duke@0 648 // rax,: index
never@304 649 __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
duke@0 650 }
duke@0 651
duke@0 652
duke@0 653 void TemplateTable::baload() {
duke@0 654 transition(itos, itos);
duke@0 655 // rdx: array
duke@0 656 index_check(rdx, rax); // kills rbx,
duke@0 657 // rax,: index
duke@0 658 // can do better code for P5 - fix this at some point
duke@0 659 __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
never@304 660 __ mov(rax, rbx);
duke@0 661 }
duke@0 662
duke@0 663
duke@0 664 void TemplateTable::caload() {
duke@0 665 transition(itos, itos);
duke@0 666 // rdx: array
duke@0 667 index_check(rdx, rax); // kills rbx,
duke@0 668 // rax,: index
duke@0 669 // can do better code for P5 - may want to improve this at some point
jrose@622 670 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
never@304 671 __ mov(rax, rbx);
duke@0 672 }
duke@0 673
duke@0 674 // iload followed by caload frequent pair
duke@0 675 void TemplateTable::fast_icaload() {
duke@0 676 transition(vtos, itos);
duke@0 677 // load index out of locals
duke@0 678 locals_index(rbx);
duke@0 679 __ movl(rax, iaddress(rbx));
duke@0 680
duke@0 681 // rdx: array
duke@0 682 index_check(rdx, rax);
duke@0 683 // rax,: index
jrose@622 684 __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
never@304 685 __ mov(rax, rbx);
duke@0 686 }
duke@0 687
duke@0 688 void TemplateTable::saload() {
duke@0 689 transition(itos, itos);
duke@0 690 // rdx: array
duke@0 691 index_check(rdx, rax); // kills rbx,
duke@0 692 // rax,: index
duke@0 693 // can do better code for P5 - may want to improve this at some point
jrose@622 694 __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
never@304 695 __ mov(rax, rbx);
duke@0 696 }
duke@0 697
duke@0 698
duke@0 699 void TemplateTable::iload(int n) {
duke@0 700 transition(vtos, itos);
duke@0 701 __ movl(rax, iaddress(n));
duke@0 702 }
duke@0 703
duke@0 704
duke@0 705 void TemplateTable::lload(int n) {
duke@0 706 transition(vtos, ltos);
never@304 707 __ movptr(rax, laddress(n));
never@304 708 NOT_LP64(__ movptr(rdx, haddress(n)));
duke@0 709 }
duke@0 710
duke@0 711
duke@0 712 void TemplateTable::fload(int n) {
duke@0 713 transition(vtos, ftos);
duke@0 714 __ fld_s(faddress(n));
duke@0 715 }
duke@0 716
duke@0 717
duke@0 718 void TemplateTable::dload(int n) {
duke@0 719 transition(vtos, dtos);
twisti@1426 720 __ fld_d(daddress(n));
duke@0 721 }
duke@0 722
duke@0 723
duke@0 724 void TemplateTable::aload(int n) {
duke@0 725 transition(vtos, atos);
never@304 726 __ movptr(rax, aaddress(n));
duke@0 727 }
duke@0 728
duke@0 729
duke@0 730 void TemplateTable::aload_0() {
duke@0 731 transition(vtos, atos);
duke@0 732 // According to bytecode histograms, the pairs:
duke@0 733 //
duke@0 734 // _aload_0, _fast_igetfield
duke@0 735 // _aload_0, _fast_agetfield
duke@0 736 // _aload_0, _fast_fgetfield
duke@0 737 //
duke@0 738 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
duke@0 739 // bytecode checks if the next bytecode is either _fast_igetfield,
duke@0 740 // _fast_agetfield or _fast_fgetfield and then rewrites the
duke@0 741 // current bytecode into a pair bytecode; otherwise it rewrites the current
duke@0 742 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
duke@0 743 //
duke@0 744 // Note: If the next bytecode is _getfield, the rewrite must be delayed,
duke@0 745 // otherwise we may miss an opportunity for a pair.
duke@0 746 //
duke@0 747 // Also rewrite frequent pairs
duke@0 748 // aload_0, aload_1
duke@0 749 // aload_0, iload_1
duke@0 750 // These bytecodes with a small amount of code are most profitable to rewrite
duke@0 751 if (RewriteFrequentPairs) {
duke@0 752 Label rewrite, done;
duke@0 753 // get next byte
duke@0 754 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
duke@0 755
duke@0 756 // do actual aload_0
duke@0 757 aload(0);
duke@0 758
duke@0 759 // if _getfield then wait with rewrite
duke@0 760 __ cmpl(rbx, Bytecodes::_getfield);
duke@0 761 __ jcc(Assembler::equal, done);
duke@0 762
duke@0 763 // if _igetfield then reqrite to _fast_iaccess_0
duke@0 764 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 765 __ cmpl(rbx, Bytecodes::_fast_igetfield);
duke@0 766 __ movl(rcx, Bytecodes::_fast_iaccess_0);
duke@0 767 __ jccb(Assembler::equal, rewrite);
duke@0 768
duke@0 769 // if _agetfield then reqrite to _fast_aaccess_0
duke@0 770 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 771 __ cmpl(rbx, Bytecodes::_fast_agetfield);
duke@0 772 __ movl(rcx, Bytecodes::_fast_aaccess_0);
duke@0 773 __ jccb(Assembler::equal, rewrite);
duke@0 774
duke@0 775 // if _fgetfield then reqrite to _fast_faccess_0
duke@0 776 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 777 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
duke@0 778 __ movl(rcx, Bytecodes::_fast_faccess_0);
duke@0 779 __ jccb(Assembler::equal, rewrite);
duke@0 780
duke@0 781 // else rewrite to _fast_aload0
duke@0 782 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 783 __ movl(rcx, Bytecodes::_fast_aload_0);
duke@0 784
duke@0 785 // rewrite
duke@0 786 // rcx: fast bytecode
duke@0 787 __ bind(rewrite);
duke@0 788 patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
duke@0 789
duke@0 790 __ bind(done);
duke@0 791 } else {
duke@0 792 aload(0);
duke@0 793 }
duke@0 794 }
duke@0 795
duke@0 796 void TemplateTable::istore() {
duke@0 797 transition(itos, vtos);
duke@0 798 locals_index(rbx);
duke@0 799 __ movl(iaddress(rbx), rax);
duke@0 800 }
duke@0 801
duke@0 802
duke@0 803 void TemplateTable::lstore() {
duke@0 804 transition(ltos, vtos);
duke@0 805 locals_index(rbx);
never@304 806 __ movptr(laddress(rbx), rax);
never@304 807 NOT_LP64(__ movptr(haddress(rbx), rdx));
duke@0 808 }
duke@0 809
duke@0 810
duke@0 811 void TemplateTable::fstore() {
duke@0 812 transition(ftos, vtos);
duke@0 813 locals_index(rbx);
duke@0 814 __ fstp_s(faddress(rbx));
duke@0 815 }
duke@0 816
duke@0 817
duke@0 818 void TemplateTable::dstore() {
duke@0 819 transition(dtos, vtos);
duke@0 820 locals_index(rbx);
twisti@1426 821 __ fstp_d(daddress(rbx));
duke@0 822 }
duke@0 823
duke@0 824
duke@0 825 void TemplateTable::astore() {
duke@0 826 transition(vtos, vtos);
twisti@1426 827 __ pop_ptr(rax);
duke@0 828 locals_index(rbx);
never@304 829 __ movptr(aaddress(rbx), rax);
duke@0 830 }
duke@0 831
duke@0 832
duke@0 833 void TemplateTable::wide_istore() {
duke@0 834 transition(vtos, vtos);
duke@0 835 __ pop_i(rax);
duke@0 836 locals_index_wide(rbx);
duke@0 837 __ movl(iaddress(rbx), rax);
duke@0 838 }
duke@0 839
duke@0 840
duke@0 841 void TemplateTable::wide_lstore() {
duke@0 842 transition(vtos, vtos);
duke@0 843 __ pop_l(rax, rdx);
duke@0 844 locals_index_wide(rbx);
never@304 845 __ movptr(laddress(rbx), rax);
never@304 846 NOT_LP64(__ movl(haddress(rbx), rdx));
duke@0 847 }
duke@0 848
duke@0 849
duke@0 850 void TemplateTable::wide_fstore() {
duke@0 851 wide_istore();
duke@0 852 }
duke@0 853
duke@0 854
duke@0 855 void TemplateTable::wide_dstore() {
duke@0 856 wide_lstore();
duke@0 857 }
duke@0 858
duke@0 859
duke@0 860 void TemplateTable::wide_astore() {
duke@0 861 transition(vtos, vtos);
twisti@1426 862 __ pop_ptr(rax);
duke@0 863 locals_index_wide(rbx);
never@304 864 __ movptr(aaddress(rbx), rax);
duke@0 865 }
duke@0 866
duke@0 867
duke@0 868 void TemplateTable::iastore() {
duke@0 869 transition(itos, vtos);
duke@0 870 __ pop_i(rbx);
duke@0 871 // rax,: value
duke@0 872 // rdx: array
duke@0 873 index_check(rdx, rbx); // prefer index in rbx,
duke@0 874 // rbx,: index
duke@0 875 __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
duke@0 876 }
duke@0 877
duke@0 878
duke@0 879 void TemplateTable::lastore() {
duke@0 880 transition(ltos, vtos);
duke@0 881 __ pop_i(rbx);
duke@0 882 // rax,: low(value)
duke@0 883 // rcx: array
duke@0 884 // rdx: high(value)
duke@0 885 index_check(rcx, rbx); // prefer index in rbx,
duke@0 886 // rbx,: index
never@304 887 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
never@304 888 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
duke@0 889 }
duke@0 890
duke@0 891
duke@0 892 void TemplateTable::fastore() {
duke@0 893 transition(ftos, vtos);
duke@0 894 __ pop_i(rbx);
duke@0 895 // rdx: array
duke@0 896 // st0: value
duke@0 897 index_check(rdx, rbx); // prefer index in rbx,
duke@0 898 // rbx,: index
duke@0 899 __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
duke@0 900 }
duke@0 901
duke@0 902
duke@0 903 void TemplateTable::dastore() {
duke@0 904 transition(dtos, vtos);
duke@0 905 __ pop_i(rbx);
duke@0 906 // rdx: array
duke@0 907 // st0: value
duke@0 908 index_check(rdx, rbx); // prefer index in rbx,
duke@0 909 // rbx,: index
duke@0 910 __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
duke@0 911 }
duke@0 912
duke@0 913
duke@0 914 void TemplateTable::aastore() {
duke@0 915 Label is_null, ok_is_subtype, done;
duke@0 916 transition(vtos, vtos);
duke@0 917 // stack: ..., array, index, value
never@304 918 __ movptr(rax, at_tos()); // Value
duke@0 919 __ movl(rcx, at_tos_p1()); // Index
never@304 920 __ movptr(rdx, at_tos_p2()); // Array
ysr@342 921
ysr@342 922 Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
duke@0 923 index_check_without_pop(rdx, rcx); // kills rbx,
duke@0 924 // do array store check - check for NULL value first
never@304 925 __ testptr(rax, rax);
duke@0 926 __ jcc(Assembler::zero, is_null);
duke@0 927
duke@0 928 // Move subklass into EBX
never@304 929 __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
duke@0 930 // Move superklass into EAX
never@304 931 __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
never@304 932 __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
never@304 933 // Compress array+index*wordSize+12 into a single register. Frees ECX.
apetrusenko@362 934 __ lea(rdx, element_address);
duke@0 935
duke@0 936 // Generate subtype check. Blows ECX. Resets EDI to locals.
duke@0 937 // Superklass in EAX. Subklass in EBX.
duke@0 938 __ gen_subtype_check( rbx, ok_is_subtype );
duke@0 939
duke@0 940 // Come here on failure
duke@0 941 // object is at TOS
duke@0 942 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
duke@0 943
duke@0 944 // Come here on success
duke@0 945 __ bind(ok_is_subtype);
ysr@342 946
ysr@342 947 // Get the value to store
apetrusenko@362 948 __ movptr(rax, at_rsp());
ysr@342 949 // and store it with appropriate barrier
ysr@342 950 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
ysr@342 951
ysr@342 952 __ jmp(done);
duke@0 953
duke@0 954 // Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
duke@0 955 __ bind(is_null);
duke@0 956 __ profile_null_seen(rbx);
ysr@342 957
ysr@342 958 // Store NULL, (noreg means NULL to do_oop_store)
ysr@342 959 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
duke@0 960
duke@0 961 // Pop stack arguments
duke@0 962 __ bind(done);
twisti@1426 963 __ addptr(rsp, 3 * Interpreter::stackElementSize);
duke@0 964 }
duke@0 965
duke@0 966
duke@0 967 void TemplateTable::bastore() {
duke@0 968 transition(itos, vtos);
duke@0 969 __ pop_i(rbx);
duke@0 970 // rax,: value
duke@0 971 // rdx: array
duke@0 972 index_check(rdx, rbx); // prefer index in rbx,
duke@0 973 // rbx,: index
duke@0 974 __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
duke@0 975 }
duke@0 976
duke@0 977
duke@0 978 void TemplateTable::castore() {
duke@0 979 transition(itos, vtos);
duke@0 980 __ pop_i(rbx);
duke@0 981 // rax,: value
duke@0 982 // rdx: array
duke@0 983 index_check(rdx, rbx); // prefer index in rbx,
duke@0 984 // rbx,: index
duke@0 985 __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
duke@0 986 }
duke@0 987
duke@0 988
duke@0 989 void TemplateTable::sastore() {
duke@0 990 castore();
duke@0 991 }
duke@0 992
duke@0 993
duke@0 994 void TemplateTable::istore(int n) {
duke@0 995 transition(itos, vtos);
duke@0 996 __ movl(iaddress(n), rax);
duke@0 997 }
duke@0 998
duke@0 999
duke@0 1000 void TemplateTable::lstore(int n) {
duke@0 1001 transition(ltos, vtos);
never@304 1002 __ movptr(laddress(n), rax);
never@304 1003 NOT_LP64(__ movptr(haddress(n), rdx));
duke@0 1004 }
duke@0 1005
duke@0 1006
duke@0 1007 void TemplateTable::fstore(int n) {
duke@0 1008 transition(ftos, vtos);
duke@0 1009 __ fstp_s(faddress(n));
duke@0 1010 }
duke@0 1011
duke@0 1012
duke@0 1013 void TemplateTable::dstore(int n) {
duke@0 1014 transition(dtos, vtos);
twisti@1426 1015 __ fstp_d(daddress(n));
duke@0 1016 }
duke@0 1017
duke@0 1018
duke@0 1019 void TemplateTable::astore(int n) {
duke@0 1020 transition(vtos, vtos);
twisti@1426 1021 __ pop_ptr(rax);
never@304 1022 __ movptr(aaddress(n), rax);
duke@0 1023 }
duke@0 1024
duke@0 1025
duke@0 1026 void TemplateTable::pop() {
duke@0 1027 transition(vtos, vtos);
twisti@1426 1028 __ addptr(rsp, Interpreter::stackElementSize);
duke@0 1029 }
duke@0 1030
duke@0 1031
duke@0 1032 void TemplateTable::pop2() {
duke@0 1033 transition(vtos, vtos);
twisti@1426 1034 __ addptr(rsp, 2*Interpreter::stackElementSize);
duke@0 1035 }
duke@0 1036
duke@0 1037
duke@0 1038 void TemplateTable::dup() {
duke@0 1039 transition(vtos, vtos);
duke@0 1040 // stack: ..., a
twisti@1426 1041 __ load_ptr(0, rax);
twisti@1426 1042 __ push_ptr(rax);
duke@0 1043 // stack: ..., a, a
duke@0 1044 }
duke@0 1045
duke@0 1046
duke@0 1047 void TemplateTable::dup_x1() {
duke@0 1048 transition(vtos, vtos);
duke@0 1049 // stack: ..., a, b
twisti@1426 1050 __ load_ptr( 0, rax); // load b
twisti@1426 1051 __ load_ptr( 1, rcx); // load a
twisti@1426 1052 __ store_ptr(1, rax); // store b
twisti@1426 1053 __ store_ptr(0, rcx); // store a
twisti@1426 1054 __ push_ptr(rax); // push b
duke@0 1055 // stack: ..., b, a, b
duke@0 1056 }
duke@0 1057
duke@0 1058
duke@0 1059 void TemplateTable::dup_x2() {
duke@0 1060 transition(vtos, vtos);
duke@0 1061 // stack: ..., a, b, c
twisti@1426 1062 __ load_ptr( 0, rax); // load c
twisti@1426 1063 __ load_ptr( 2, rcx); // load a
twisti@1426 1064 __ store_ptr(2, rax); // store c in a
twisti@1426 1065 __ push_ptr(rax); // push c
duke@0 1066 // stack: ..., c, b, c, c
twisti@1426 1067 __ load_ptr( 2, rax); // load b
twisti@1426 1068 __ store_ptr(2, rcx); // store a in b
duke@0 1069 // stack: ..., c, a, c, c
twisti@1426 1070 __ store_ptr(1, rax); // store b in c
duke@0 1071 // stack: ..., c, a, b, c
duke@0 1072 }
duke@0 1073
duke@0 1074
duke@0 1075 void TemplateTable::dup2() {
duke@0 1076 transition(vtos, vtos);
duke@0 1077 // stack: ..., a, b
twisti@1426 1078 __ load_ptr(1, rax); // load a
twisti@1426 1079 __ push_ptr(rax); // push a
twisti@1426 1080 __ load_ptr(1, rax); // load b
twisti@1426 1081 __ push_ptr(rax); // push b
duke@0 1082 // stack: ..., a, b, a, b
duke@0 1083 }
duke@0 1084
duke@0 1085
duke@0 1086 void TemplateTable::dup2_x1() {
duke@0 1087 transition(vtos, vtos);
duke@0 1088 // stack: ..., a, b, c
twisti@1426 1089 __ load_ptr( 0, rcx); // load c
twisti@1426 1090 __ load_ptr( 1, rax); // load b
twisti@1426 1091 __ push_ptr(rax); // push b
twisti@1426 1092 __ push_ptr(rcx); // push c
duke@0 1093 // stack: ..., a, b, c, b, c
twisti@1426 1094 __ store_ptr(3, rcx); // store c in b
duke@0 1095 // stack: ..., a, c, c, b, c
twisti@1426 1096 __ load_ptr( 4, rcx); // load a
twisti@1426 1097 __ store_ptr(2, rcx); // store a in 2nd c
duke@0 1098 // stack: ..., a, c, a, b, c
twisti@1426 1099 __ store_ptr(4, rax); // store b in a
duke@0 1100 // stack: ..., b, c, a, b, c
duke@0 1101 // stack: ..., b, c, a, b, c
duke@0 1102 }
duke@0 1103
duke@0 1104
duke@0 1105 void TemplateTable::dup2_x2() {
duke@0 1106 transition(vtos, vtos);
duke@0 1107 // stack: ..., a, b, c, d
twisti@1426 1108 __ load_ptr( 0, rcx); // load d
twisti@1426 1109 __ load_ptr( 1, rax); // load c
twisti@1426 1110 __ push_ptr(rax); // push c
twisti@1426 1111 __ push_ptr(rcx); // push d
duke@0 1112 // stack: ..., a, b, c, d, c, d
twisti@1426 1113 __ load_ptr( 4, rax); // load b
twisti@1426 1114 __ store_ptr(2, rax); // store b in d
twisti@1426 1115 __ store_ptr(4, rcx); // store d in b
duke@0 1116 // stack: ..., a, d, c, b, c, d
twisti@1426 1117 __ load_ptr( 5, rcx); // load a
twisti@1426 1118 __ load_ptr( 3, rax); // load c
twisti@1426 1119 __ store_ptr(3, rcx); // store a in c
twisti@1426 1120 __ store_ptr(5, rax); // store c in a
duke@0 1121 // stack: ..., c, d, a, b, c, d
duke@0 1122 // stack: ..., c, d, a, b, c, d
duke@0 1123 }
duke@0 1124
duke@0 1125
duke@0 1126 void TemplateTable::swap() {
duke@0 1127 transition(vtos, vtos);
duke@0 1128 // stack: ..., a, b
twisti@1426 1129 __ load_ptr( 1, rcx); // load a
twisti@1426 1130 __ load_ptr( 0, rax); // load b
twisti@1426 1131 __ store_ptr(0, rcx); // store a in b
twisti@1426 1132 __ store_ptr(1, rax); // store b in a
duke@0 1133 // stack: ..., b, a
duke@0 1134 }
duke@0 1135
duke@0 1136
duke@0 1137 void TemplateTable::iop2(Operation op) {
duke@0 1138 transition(itos, itos);
duke@0 1139 switch (op) {
twisti@1426 1140 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
never@304 1141 case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
twisti@1426 1142 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
twisti@1426 1143 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
twisti@1426 1144 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
twisti@1426 1145 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
never@304 1146 case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
never@304 1147 case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
never@304 1148 case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
duke@0 1149 default : ShouldNotReachHere();
duke@0 1150 }
duke@0 1151 }
duke@0 1152
duke@0 1153
duke@0 1154 void TemplateTable::lop2(Operation op) {
duke@0 1155 transition(ltos, ltos);
duke@0 1156 __ pop_l(rbx, rcx);
duke@0 1157 switch (op) {
twisti@1426 1158 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
twisti@1426 1159 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
twisti@1426 1160 __ mov (rax, rbx); __ mov (rdx, rcx); break;
twisti@1426 1161 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
twisti@1426 1162 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
twisti@1426 1163 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
twisti@1426 1164 default : ShouldNotReachHere();
duke@0 1165 }
duke@0 1166 }
duke@0 1167
duke@0 1168
duke@0 1169 void TemplateTable::idiv() {
duke@0 1170 transition(itos, itos);
never@304 1171 __ mov(rcx, rax);
duke@0 1172 __ pop_i(rax);
duke@0 1173 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
duke@0 1174 // they are not equal, one could do a normal division (no correction
duke@0 1175 // needed), which may speed up this implementation for the common case.
duke@0 1176 // (see also JVM spec., p.243 & p.271)
duke@0 1177 __ corrected_idivl(rcx);
duke@0 1178 }
duke@0 1179
duke@0 1180
duke@0 1181 void TemplateTable::irem() {
duke@0 1182 transition(itos, itos);
never@304 1183 __ mov(rcx, rax);
duke@0 1184 __ pop_i(rax);
duke@0 1185 // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
duke@0 1186 // they are not equal, one could do a normal division (no correction
duke@0 1187 // needed), which may speed up this implementation for the common case.
duke@0 1188 // (see also JVM spec., p.243 & p.271)
duke@0 1189 __ corrected_idivl(rcx);
never@304 1190 __ mov(rax, rdx);
duke@0 1191 }
duke@0 1192
duke@0 1193
duke@0 1194 void TemplateTable::lmul() {
duke@0 1195 transition(ltos, ltos);
duke@0 1196 __ pop_l(rbx, rcx);
never@304 1197 __ push(rcx); __ push(rbx);
never@304 1198 __ push(rdx); __ push(rax);
duke@0 1199 __ lmul(2 * wordSize, 0);
never@304 1200 __ addptr(rsp, 4 * wordSize); // take off temporaries
duke@0 1201 }
duke@0 1202
duke@0 1203
duke@0 1204 void TemplateTable::ldiv() {
duke@0 1205 transition(ltos, ltos);
duke@0 1206 __ pop_l(rbx, rcx);
never@304 1207 __ push(rcx); __ push(rbx);
never@304 1208 __ push(rdx); __ push(rax);
duke@0 1209 // check if y = 0
duke@0 1210 __ orl(rax, rdx);
duke@0 1211 __ jump_cc(Assembler::zero,
duke@0 1212 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
duke@0 1213 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
never@304 1214 __ addptr(rsp, 4 * wordSize); // take off temporaries
duke@0 1215 }
duke@0 1216
duke@0 1217
duke@0 1218 void TemplateTable::lrem() {
duke@0 1219 transition(ltos, ltos);
duke@0 1220 __ pop_l(rbx, rcx);
never@304 1221 __ push(rcx); __ push(rbx);
never@304 1222 __ push(rdx); __ push(rax);
duke@0 1223 // check if y = 0
duke@0 1224 __ orl(rax, rdx);
duke@0 1225 __ jump_cc(Assembler::zero,
duke@0 1226 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
duke@0 1227 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
never@304 1228 __ addptr(rsp, 4 * wordSize);
duke@0 1229 }
duke@0 1230
duke@0 1231
duke@0 1232 void TemplateTable::lshl() {
duke@0 1233 transition(itos, ltos);
duke@0 1234 __ movl(rcx, rax); // get shift count
duke@0 1235 __ pop_l(rax, rdx); // get shift value
duke@0 1236 __ lshl(rdx, rax);
duke@0 1237 }
duke@0 1238
duke@0 1239
duke@0 1240 void TemplateTable::lshr() {
duke@0 1241 transition(itos, ltos);
never@304 1242 __ mov(rcx, rax); // get shift count
duke@0 1243 __ pop_l(rax, rdx); // get shift value
duke@0 1244 __ lshr(rdx, rax, true);
duke@0 1245 }
duke@0 1246
duke@0 1247
duke@0 1248 void TemplateTable::lushr() {
duke@0 1249 transition(itos, ltos);
never@304 1250 __ mov(rcx, rax); // get shift count
duke@0 1251 __ pop_l(rax, rdx); // get shift value
duke@0 1252 __ lshr(rdx, rax);
duke@0 1253 }
duke@0 1254
duke@0 1255
duke@0 1256 void TemplateTable::fop2(Operation op) {
duke@0 1257 transition(ftos, ftos);
duke@0 1258 switch (op) {
duke@0 1259 case add: __ fadd_s (at_rsp()); break;
duke@0 1260 case sub: __ fsubr_s(at_rsp()); break;
duke@0 1261 case mul: __ fmul_s (at_rsp()); break;
duke@0 1262 case div: __ fdivr_s(at_rsp()); break;
duke@0 1263 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
duke@0 1264 default : ShouldNotReachHere();
duke@0 1265 }
duke@0 1266 __ f2ieee();
never@304 1267 __ pop(rax); // pop float thing off
duke@0 1268 }
duke@0 1269
duke@0 1270
duke@0 1271 void TemplateTable::dop2(Operation op) {
duke@0 1272 transition(dtos, dtos);
duke@0 1273
duke@0 1274 switch (op) {
duke@0 1275 case add: __ fadd_d (at_rsp()); break;
duke@0 1276 case sub: __ fsubr_d(at_rsp()); break;
duke@0 1277 case mul: {
duke@0 1278 Label L_strict;
duke@0 1279 Label L_join;
duke@0 1280 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
duke@0 1281 __ get_method(rcx);
duke@0 1282 __ movl(rcx, access_flags);
duke@0 1283 __ testl(rcx, JVM_ACC_STRICT);
duke@0 1284 __ jccb(Assembler::notZero, L_strict);
duke@0 1285 __ fmul_d (at_rsp());
duke@0 1286 __ jmpb(L_join);
duke@0 1287 __ bind(L_strict);
duke@0 1288 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
duke@0 1289 __ fmulp();
duke@0 1290 __ fmul_d (at_rsp());
duke@0 1291 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
duke@0 1292 __ fmulp();
duke@0 1293 __ bind(L_join);
duke@0 1294 break;
duke@0 1295 }
duke@0 1296 case div: {
duke@0 1297 Label L_strict;
duke@0 1298 Label L_join;
duke@0 1299 const Address access_flags (rcx, methodOopDesc::access_flags_offset());
duke@0 1300 __ get_method(rcx);
duke@0 1301 __ movl(rcx, access_flags);
duke@0 1302 __ testl(rcx, JVM_ACC_STRICT);
duke@0 1303 __ jccb(Assembler::notZero, L_strict);
duke@0 1304 __ fdivr_d(at_rsp());
duke@0 1305 __ jmp(L_join);
duke@0 1306 __ bind(L_strict);
duke@0 1307 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
duke@0 1308 __ fmul_d (at_rsp());
duke@0 1309 __ fdivrp();
duke@0 1310 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
duke@0 1311 __ fmulp();
duke@0 1312 __ bind(L_join);
duke@0 1313 break;
duke@0 1314 }
duke@0 1315 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
duke@0 1316 default : ShouldNotReachHere();
duke@0 1317 }
duke@0 1318 __ d2ieee();
duke@0 1319 // Pop double precision number from rsp.
never@304 1320 __ pop(rax);
never@304 1321 __ pop(rdx);
duke@0 1322 }
duke@0 1323
duke@0 1324
duke@0 1325 void TemplateTable::ineg() {
duke@0 1326 transition(itos, itos);
duke@0 1327 __ negl(rax);
duke@0 1328 }
duke@0 1329
duke@0 1330
duke@0 1331 void TemplateTable::lneg() {
duke@0 1332 transition(ltos, ltos);
duke@0 1333 __ lneg(rdx, rax);
duke@0 1334 }
duke@0 1335
duke@0 1336
duke@0 1337 void TemplateTable::fneg() {
duke@0 1338 transition(ftos, ftos);
duke@0 1339 __ fchs();
duke@0 1340 }
duke@0 1341
duke@0 1342
duke@0 1343 void TemplateTable::dneg() {
duke@0 1344 transition(dtos, dtos);
duke@0 1345 __ fchs();
duke@0 1346 }
duke@0 1347
duke@0 1348
duke@0 1349 void TemplateTable::iinc() {
duke@0 1350 transition(vtos, vtos);
duke@0 1351 __ load_signed_byte(rdx, at_bcp(2)); // get constant
duke@0 1352 locals_index(rbx);
duke@0 1353 __ addl(iaddress(rbx), rdx);
duke@0 1354 }
duke@0 1355
duke@0 1356
duke@0 1357 void TemplateTable::wide_iinc() {
duke@0 1358 transition(vtos, vtos);
duke@0 1359 __ movl(rdx, at_bcp(4)); // get constant
duke@0 1360 locals_index_wide(rbx);
never@304 1361 __ bswapl(rdx); // swap bytes & sign-extend constant
duke@0 1362 __ sarl(rdx, 16);
duke@0 1363 __ addl(iaddress(rbx), rdx);
duke@0 1364 // Note: should probably use only one movl to get both
duke@0 1365 // the index and the constant -> fix this
duke@0 1366 }
duke@0 1367
duke@0 1368
duke@0 1369 void TemplateTable::convert() {
duke@0 1370 // Checking
duke@0 1371 #ifdef ASSERT
duke@0 1372 { TosState tos_in = ilgl;
duke@0 1373 TosState tos_out = ilgl;
duke@0 1374 switch (bytecode()) {
duke@0 1375 case Bytecodes::_i2l: // fall through
duke@0 1376 case Bytecodes::_i2f: // fall through
duke@0 1377 case Bytecodes::_i2d: // fall through
duke@0 1378 case Bytecodes::_i2b: // fall through
duke@0 1379 case Bytecodes::_i2c: // fall through
duke@0 1380 case Bytecodes::_i2s: tos_in = itos; break;
duke@0 1381 case Bytecodes::_l2i: // fall through
duke@0 1382 case Bytecodes::_l2f: // fall through
duke@0 1383 case Bytecodes::_l2d: tos_in = ltos; break;
duke@0 1384 case Bytecodes::_f2i: // fall through
duke@0 1385 case Bytecodes::_f2l: // fall through
duke@0 1386 case Bytecodes::_f2d: tos_in = ftos; break;
duke@0 1387 case Bytecodes::_d2i: // fall through
duke@0 1388 case Bytecodes::_d2l: // fall through
duke@0 1389 case Bytecodes::_d2f: tos_in = dtos; break;
duke@0 1390 default : ShouldNotReachHere();
duke@0 1391 }
duke@0 1392 switch (bytecode()) {
duke@0 1393 case Bytecodes::_l2i: // fall through
duke@0 1394 case Bytecodes::_f2i: // fall through
duke@0 1395 case Bytecodes::_d2i: // fall through
duke@0 1396 case Bytecodes::_i2b: // fall through
duke@0 1397 case Bytecodes::_i2c: // fall through
duke@0 1398 case Bytecodes::_i2s: tos_out = itos; break;
duke@0 1399 case Bytecodes::_i2l: // fall through
duke@0 1400 case Bytecodes::_f2l: // fall through
duke@0 1401 case Bytecodes::_d2l: tos_out = ltos; break;
duke@0 1402 case Bytecodes::_i2f: // fall through
duke@0 1403 case Bytecodes::_l2f: // fall through
duke@0 1404 case Bytecodes::_d2f: tos_out = ftos; break;
duke@0 1405 case Bytecodes::_i2d: // fall through
duke@0 1406 case Bytecodes::_l2d: // fall through
duke@0 1407 case Bytecodes::_f2d: tos_out = dtos; break;
duke@0 1408 default : ShouldNotReachHere();
duke@0 1409 }
duke@0 1410 transition(tos_in, tos_out);
duke@0 1411 }
duke@0 1412 #endif // ASSERT
duke@0 1413
duke@0 1414 // Conversion
never@304 1415 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
duke@0 1416 switch (bytecode()) {
duke@0 1417 case Bytecodes::_i2l:
duke@0 1418 __ extend_sign(rdx, rax);
duke@0 1419 break;
duke@0 1420 case Bytecodes::_i2f:
never@304 1421 __ push(rax); // store int on tos
duke@0 1422 __ fild_s(at_rsp()); // load int to ST0
duke@0 1423 __ f2ieee(); // truncate to float size
never@304 1424 __ pop(rcx); // adjust rsp
duke@0 1425 break;
duke@0 1426 case Bytecodes::_i2d:
never@304 1427 __ push(rax); // add one slot for d2ieee()
never@304 1428 __ push(rax); // store int on tos
duke@0 1429 __ fild_s(at_rsp()); // load int to ST0
duke@0 1430 __ d2ieee(); // truncate to double size
never@304 1431 __ pop(rcx); // adjust rsp
never@304 1432 __ pop(rcx);
duke@0 1433 break;
duke@0 1434 case Bytecodes::_i2b:
duke@0 1435 __ shll(rax, 24); // truncate upper 24 bits
duke@0 1436 __ sarl(rax, 24); // and sign-extend byte
never@304 1437 LP64_ONLY(__ movsbl(rax, rax));
duke@0 1438 break;
duke@0 1439 case Bytecodes::_i2c:
duke@0 1440 __ andl(rax, 0xFFFF); // truncate upper 16 bits
never@304 1441 LP64_ONLY(__ movzwl(rax, rax));
duke@0 1442 break;
duke@0 1443 case Bytecodes::_i2s:
duke@0 1444 __ shll(rax, 16); // truncate upper 16 bits
duke@0 1445 __ sarl(rax, 16); // and sign-extend short
never@304 1446 LP64_ONLY(__ movswl(rax, rax));
duke@0 1447 break;
duke@0 1448 case Bytecodes::_l2i:
duke@0 1449 /* nothing to do */
duke@0 1450 break;
duke@0 1451 case Bytecodes::_l2f:
never@304 1452 __ push(rdx); // store long on tos
never@304 1453 __ push(rax);
duke@0 1454 __ fild_d(at_rsp()); // load long to ST0
duke@0 1455 __ f2ieee(); // truncate to float size
never@304 1456 __ pop(rcx); // adjust rsp
never@304 1457 __ pop(rcx);
duke@0 1458 break;
duke@0 1459 case Bytecodes::_l2d:
never@304 1460 __ push(rdx); // store long on tos
never@304 1461 __ push(rax);
duke@0 1462 __ fild_d(at_rsp()); // load long to ST0
duke@0 1463 __ d2ieee(); // truncate to double size
never@304 1464 __ pop(rcx); // adjust rsp
never@304 1465 __ pop(rcx);
duke@0 1466 break;
duke@0 1467 case Bytecodes::_f2i:
never@304 1468 __ push(rcx); // reserve space for argument
duke@0 1469 __ fstp_s(at_rsp()); // pass float argument on stack
duke@0 1470 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
duke@0 1471 break;
duke@0 1472 case Bytecodes::_f2l:
never@304 1473 __ push(rcx); // reserve space for argument
duke@0 1474 __ fstp_s(at_rsp()); // pass float argument on stack
duke@0 1475 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
duke@0 1476 break;
duke@0 1477 case Bytecodes::_f2d:
duke@0 1478 /* nothing to do */
duke@0 1479 break;
duke@0 1480 case Bytecodes::_d2i:
never@304 1481 __ push(rcx); // reserve space for argument
never@304 1482 __ push(rcx);
duke@0 1483 __ fstp_d(at_rsp()); // pass double argument on stack
duke@0 1484 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
duke@0 1485 break;
duke@0 1486 case Bytecodes::_d2l:
never@304 1487 __ push(rcx); // reserve space for argument
never@304 1488 __ push(rcx);
duke@0 1489 __ fstp_d(at_rsp()); // pass double argument on stack
duke@0 1490 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
duke@0 1491 break;
duke@0 1492 case Bytecodes::_d2f:
never@304 1493 __ push(rcx); // reserve space for f2ieee()
duke@0 1494 __ f2ieee(); // truncate to float size
never@304 1495 __ pop(rcx); // adjust rsp
duke@0 1496 break;
duke@0 1497 default :
duke@0 1498 ShouldNotReachHere();
duke@0 1499 }
duke@0 1500 }
duke@0 1501
duke@0 1502
duke@0 1503 void TemplateTable::lcmp() {
duke@0 1504 transition(ltos, itos);
duke@0 1505 // y = rdx:rax
duke@0 1506 __ pop_l(rbx, rcx); // get x = rcx:rbx
duke@0 1507 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
never@304 1508 __ mov(rax, rcx);
duke@0 1509 }
duke@0 1510
duke@0 1511
duke@0 1512 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
duke@0 1513 if (is_float) {
duke@0 1514 __ fld_s(at_rsp());
duke@0 1515 } else {
duke@0 1516 __ fld_d(at_rsp());
never@304 1517 __ pop(rdx);
duke@0 1518 }
never@304 1519 __ pop(rcx);
duke@0 1520 __ fcmp2int(rax, unordered_result < 0);
duke@0 1521 }
duke@0 1522
duke@0 1523
duke@0 1524 void TemplateTable::branch(bool is_jsr, bool is_wide) {
duke@0 1525 __ get_method(rcx); // ECX holds method
duke@0 1526 __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
duke@0 1527
duke@0 1528 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset();
duke@0 1529 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset();
duke@0 1530 const int method_offset = frame::interpreter_frame_method_offset * wordSize;
duke@0 1531
duke@0 1532 // Load up EDX with the branch displacement
duke@0 1533 __ movl(rdx, at_bcp(1));
never@304 1534 __ bswapl(rdx);
duke@0 1535 if (!is_wide) __ sarl(rdx, 16);
never@304 1536 LP64_ONLY(__ movslq(rdx, rdx));
never@304 1537
duke@0 1538
duke@0 1539 // Handle all the JSR stuff here, then exit.
duke@0 1540 // It's much shorter and cleaner than intermingling with the
twisti@605 1541 // non-JSR normal-branch stuff occurring below.
duke@0 1542 if (is_jsr) {
duke@0 1543 // Pre-load the next target bytecode into EBX
duke@0 1544 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
duke@0 1545
duke@0 1546 // compute return address as bci in rax,
never@304 1547 __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
never@304 1548 __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
apetrusenko@362 1549 // Adjust the bcp in RSI by the displacement in EDX
never@304 1550 __ addptr(rsi, rdx);
duke@0 1551 // Push return address
duke@0 1552 __ push_i(rax);
duke@0 1553 // jsr returns vtos
duke@0 1554 __ dispatch_only_noverify(vtos);
duke@0 1555 return;
duke@0 1556 }
duke@0 1557
duke@0 1558 // Normal (non-jsr) branch handling
duke@0 1559
apetrusenko@362 1560 // Adjust the bcp in RSI by the displacement in EDX
never@304 1561 __ addptr(rsi, rdx);
duke@0 1562
duke@0 1563 assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
duke@0 1564 Label backedge_counter_overflow;
duke@0 1565 Label profile_method;
duke@0 1566 Label dispatch;
duke@0 1567 if (UseLoopCounter) {
duke@0 1568 // increment backedge counter for backward branches
duke@0 1569 // rax,: MDO
duke@0 1570 // rbx,: MDO bumped taken-count
duke@0 1571 // rcx: method
duke@0 1572 // rdx: target offset
duke@0 1573 // rsi: target bcp
duke@0 1574 // rdi: locals pointer
duke@0 1575 __ testl(rdx, rdx); // check if forward or backward branch
duke@0 1576 __ jcc(Assembler::positive, dispatch); // count only if backward branch
duke@0 1577
iveresov@1707 1578 if (TieredCompilation) {
iveresov@1707 1579 Label no_mdo;
iveresov@1707 1580 int increment = InvocationCounter::count_increment;
iveresov@1707 1581 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
iveresov@1707 1582 if (ProfileInterpreter) {
iveresov@1707 1583 // Are we profiling?
iveresov@1707 1584 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
iveresov@1707 1585 __ testptr(rbx, rbx);
iveresov@1707 1586 __ jccb(Assembler::zero, no_mdo);
iveresov@1707 1587 // Increment the MDO backedge counter
iveresov@1707 1588 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
iveresov@1707 1589 in_bytes(InvocationCounter::counter_offset()));
iveresov@1707 1590 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
iveresov@1707 1591 rax, false, Assembler::zero, &backedge_counter_overflow);
iveresov@1707 1592 __ jmp(dispatch);
duke@0 1593 }
iveresov@1707 1594 __ bind(no_mdo);
iveresov@1707 1595 // Increment backedge counter in methodOop
iveresov@1707 1596 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
iveresov@1707 1597 rax, false, Assembler::zero, &backedge_counter_overflow);
duke@0 1598 } else {
iveresov@1707 1599 // increment counter
iveresov@1707 1600 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
iveresov@1707 1601 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
iveresov@1707 1602 __ movl(Address(rcx, be_offset), rax); // store counter
iveresov@1707 1603
iveresov@1707 1604 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
iveresov@1707 1605 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
iveresov@1707 1606 __ addl(rax, Address(rcx, be_offset)); // add both counters
iveresov@1707 1607
iveresov@1707 1608 if (ProfileInterpreter) {
iveresov@1707 1609 // Test to see if we should create a method data oop
duke@0 1610 __ cmp32(rax,
iveresov@1707 1611 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
iveresov@1707 1612 __ jcc(Assembler::less, dispatch);
iveresov@1707 1613
iveresov@1707 1614 // if no method data exists, go to profile method
iveresov@1707 1615 __ test_method_data_pointer(rax, profile_method);
iveresov@1707 1616
iveresov@1707 1617 if (UseOnStackReplacement) {
iveresov@1707 1618 // check for overflow against rbx, which is the MDO taken count
iveresov@1707 1619 __ cmp32(rbx,
iveresov@1707 1620 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
iveresov@1707 1621 __ jcc(Assembler::below, dispatch);
iveresov@1707 1622
iveresov@1707 1623 // When ProfileInterpreter is on, the backedge_count comes from the
iveresov@1707 1624 // methodDataOop, which value does not get reset on the call to
iveresov@1707 1625 // frequency_counter_overflow(). To avoid excessive calls to the overflow
iveresov@1707 1626 // routine while the method is being compiled, add a second test to make
iveresov@1707 1627 // sure the overflow function is called only once every overflow_frequency.
iveresov@1707 1628 const int overflow_frequency = 1024;
iveresov@1707 1629 __ andptr(rbx, overflow_frequency-1);
iveresov@1707 1630 __ jcc(Assembler::zero, backedge_counter_overflow);
iveresov@1707 1631 }
iveresov@1707 1632 } else {
iveresov@1707 1633 if (UseOnStackReplacement) {
iveresov@1707 1634 // check for overflow against rax, which is the sum of the counters
iveresov@1707 1635 __ cmp32(rax,
iveresov@1707 1636 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
iveresov@1707 1637 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
iveresov@1707 1638
iveresov@1707 1639 }
duke@0 1640 }
duke@0 1641 }
duke@0 1642 __ bind(dispatch);
duke@0 1643 }
duke@0 1644
duke@0 1645 // Pre-load the next target bytecode into EBX
duke@0 1646 __ load_unsigned_byte(rbx, Address(rsi, 0));
duke@0 1647
duke@0 1648 // continue with the bytecode @ target
duke@0 1649 // rax,: return bci for jsr's, unused otherwise
duke@0 1650 // rbx,: target bytecode
duke@0 1651 // rsi: target bcp
duke@0 1652 __ dispatch_only(vtos);
duke@0 1653
duke@0 1654 if (UseLoopCounter) {
duke@0 1655 if (ProfileInterpreter) {
duke@0 1656 // Out-of-line code to allocate method data oop.
duke@0 1657 __ bind(profile_method);
duke@0 1658 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
duke@0 1659 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
never@304 1660 __ movptr(rcx, Address(rbp, method_offset));
never@304 1661 __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
never@304 1662 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
duke@0 1663 __ test_method_data_pointer(rcx, dispatch);
duke@0 1664 // offset non-null mdp by MDO::data_offset() + IR::profile_method()
never@304 1665 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
never@304 1666 __ addptr(rcx, rax);
never@304 1667 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
duke@0 1668 __ jmp(dispatch);
duke@0 1669 }
duke@0 1670
duke@0 1671 if (UseOnStackReplacement) {
duke@0 1672
duke@0 1673 // invocation counter overflow
duke@0 1674 __ bind(backedge_counter_overflow);
never@304 1675 __ negptr(rdx);
never@304 1676 __ addptr(rdx, rsi); // branch bcp
duke@0 1677 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
duke@0 1678 __ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
duke@0 1679
duke@0 1680 // rax,: osr nmethod (osr ok) or NULL (osr not possible)
duke@0 1681 // rbx,: target bytecode
duke@0 1682 // rdx: scratch
duke@0 1683 // rdi: locals pointer
duke@0 1684 // rsi: bcp
never@304 1685 __ testptr(rax, rax); // test result
duke@0 1686 __ jcc(Assembler::zero, dispatch); // no osr if null
duke@0 1687 // nmethod may have been invalidated (VM may block upon call_VM return)
duke@0 1688 __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
duke@0 1689 __ cmpl(rcx, InvalidOSREntryBci);
duke@0 1690 __ jcc(Assembler::equal, dispatch);
duke@0 1691
duke@0 1692 // We have the address of an on stack replacement routine in rax,
duke@0 1693 // We need to prepare to execute the OSR method. First we must
duke@0 1694 // migrate the locals and monitors off of the stack.
duke@0 1695
never@304 1696 __ mov(rbx, rax); // save the nmethod
duke@0 1697
duke@0 1698 const Register thread = rcx;
duke@0 1699 __ get_thread(thread);
duke@0 1700 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
duke@0 1701 // rax, is OSR buffer, move it to expected parameter location
never@304 1702 __ mov(rcx, rax);
duke@0 1703
duke@0 1704 // pop the interpreter frame
never@304 1705 __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
duke@0 1706 __ leave(); // remove frame anchor
never@304 1707 __ pop(rdi); // get return address
never@304 1708 __ mov(rsp, rdx); // set sp to sender sp
duke@0 1709
duke@0 1710
duke@0 1711 Label skip;
duke@0 1712 Label chkint;
duke@0 1713
duke@0 1714 // The interpreter frame we have removed may be returning to
duke@0 1715 // either the callstub or the interpreter. Since we will
duke@0 1716 // now be returning from a compiled (OSR) nmethod we must
duke@0 1717 // adjust the return to the return were it can handler compiled
duke@0 1718 // results and clean the fpu stack. This is very similar to
duke@0 1719 // what a i2c adapter must do.
duke@0 1720
duke@0 1721 // Are we returning to the call stub?
duke@0 1722
duke@0 1723 __ cmp32(rdi, ExternalAddress(StubRoutines::_call_stub_return_address));
duke@0 1724 __ jcc(Assembler::notEqual, chkint);
duke@0 1725
duke@0 1726 // yes adjust to the specialized call stub return.
never@304 1727 assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
never@304 1728 __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
duke@0 1729 __ jmp(skip);
duke@0 1730
duke@0 1731 __ bind(chkint);
duke@0 1732
duke@0 1733 // Are we returning to the interpreter? Look for sentinel
duke@0 1734
never@304 1735 __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
duke@0 1736 __ jcc(Assembler::notEqual, skip);
duke@0 1737
duke@0 1738 // Adjust to compiled return back to interpreter
duke@0 1739
never@304 1740 __ movptr(rdi, Address(rdi, -wordSize));
duke@0 1741 __ bind(skip);
duke@0 1742
duke@0 1743 // Align stack pointer for compiled code (note that caller is
duke@0 1744 // responsible for undoing this fixup by remembering the old SP
duke@0 1745 // in an rbp,-relative location)
never@304 1746 __ andptr(rsp, -(StackAlignmentInBytes));
duke@0 1747
duke@0 1748 // push the (possibly adjusted) return address
never@304 1749 __ push(rdi);
duke@0 1750
duke@0 1751 // and begin the OSR nmethod
sgoldman@107 1752 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
duke@0 1753 }
duke@0 1754 }
duke@0 1755 }
duke@0 1756
duke@0 1757
duke@0 1758 void TemplateTable::if_0cmp(Condition cc) {
duke@0 1759 transition(itos, vtos);
duke@0 1760 // assume branch is more often taken than not (loops use backward branches)
duke@0 1761 Label not_taken;
duke@0 1762 __ testl(rax, rax);
duke@0 1763 __ jcc(j_not(cc), not_taken);
duke@0 1764 branch(false, false);
duke@0 1765 __ bind(not_taken);
duke@0 1766 __ profile_not_taken_branch(rax);
duke@0 1767 }
duke@0 1768
duke@0 1769
duke@0 1770 void TemplateTable::if_icmp(Condition cc) {
duke@0 1771 transition(itos, vtos);
duke@0 1772 // assume branch is more often taken than not (loops use backward branches)
duke@0 1773 Label not_taken;
duke@0 1774 __ pop_i(rdx);
duke@0 1775 __ cmpl(rdx, rax);
duke@0 1776 __ jcc(j_not(cc), not_taken);
duke@0 1777 branch(false, false);
duke@0 1778 __ bind(not_taken);
duke@0 1779 __ profile_not_taken_branch(rax);
duke@0 1780 }
duke@0 1781
duke@0 1782
duke@0 1783 void TemplateTable::if_nullcmp(Condition cc) {
duke@0 1784 transition(atos, vtos);
duke@0 1785 // assume branch is more often taken than not (loops use backward branches)
duke@0 1786 Label not_taken;
never@304 1787 __ testptr(rax, rax);
duke@0 1788 __ jcc(j_not(cc), not_taken);
duke@0 1789 branch(false, false);
duke@0 1790 __ bind(not_taken);
duke@0 1791 __ profile_not_taken_branch(rax);
duke@0 1792 }
duke@0 1793
duke@0 1794
duke@0 1795 void TemplateTable::if_acmp(Condition cc) {
duke@0 1796 transition(atos, vtos);
duke@0 1797 // assume branch is more often taken than not (loops use backward branches)
duke@0 1798 Label not_taken;
duke@0 1799 __ pop_ptr(rdx);
never@304 1800 __ cmpptr(rdx, rax);
duke@0 1801 __ jcc(j_not(cc), not_taken);
duke@0 1802 branch(false, false);
duke@0 1803 __ bind(not_taken);
duke@0 1804 __ profile_not_taken_branch(rax);
duke@0 1805 }
duke@0 1806
duke@0 1807
duke@0 1808 void TemplateTable::ret() {
duke@0 1809 transition(vtos, vtos);
duke@0 1810 locals_index(rbx);
never@304 1811 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
duke@0 1812 __ profile_ret(rbx, rcx);
duke@0 1813 __ get_method(rax);
never@304 1814 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
never@304 1815 __ lea(rsi, Address(rsi, rbx, Address::times_1,
never@304 1816 constMethodOopDesc::codes_offset()));
duke@0 1817 __ dispatch_next(vtos);
duke@0 1818 }
duke@0 1819
duke@0 1820
duke@0 1821 void TemplateTable::wide_ret() {
duke@0 1822 transition(vtos, vtos);
duke@0 1823 locals_index_wide(rbx);
never@304 1824 __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
duke@0 1825 __ profile_ret(rbx, rcx);
duke@0 1826 __ get_method(rax);
never@304 1827 __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
never@304 1828 __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
duke@0 1829 __ dispatch_next(vtos);
duke@0 1830 }
duke@0 1831
duke@0 1832
duke@0 1833 void TemplateTable::tableswitch() {
duke@0 1834 Label default_case, continue_execution;
duke@0 1835 transition(itos, vtos);
duke@0 1836 // align rsi
never@304 1837 __ lea(rbx, at_bcp(wordSize));
never@304 1838 __ andptr(rbx, -wordSize);
duke@0 1839 // load lo & hi
duke@0 1840 __ movl(rcx, Address(rbx, 1 * wordSize));
duke@0 1841 __ movl(rdx, Address(rbx, 2 * wordSize));
never@304 1842 __ bswapl(rcx);
never@304 1843 __ bswapl(rdx);
duke@0 1844 // check against lo & hi
duke@0 1845 __ cmpl(rax, rcx);
duke@0 1846 __ jccb(Assembler::less, default_case);
duke@0 1847 __ cmpl(rax, rdx);
duke@0 1848 __ jccb(Assembler::greater, default_case);
duke@0 1849 // lookup dispatch offset
duke@0 1850 __ subl(rax, rcx);
never@304 1851 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
duke@0 1852 __ profile_switch_case(rax, rbx, rcx);
duke@0 1853 // continue execution
duke@0 1854 __ bind(continue_execution);
never@304 1855 __ bswapl(rdx);
duke@0 1856 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
never@304 1857 __ addptr(rsi, rdx);
duke@0 1858 __ dispatch_only(vtos);
duke@0 1859 // handle default
duke@0 1860 __ bind(default_case);
duke@0 1861 __ profile_switch_default(rax);
duke@0 1862 __ movl(rdx, Address(rbx, 0));
duke@0 1863 __ jmp(continue_execution);
duke@0 1864 }
duke@0 1865
duke@0 1866
duke@0 1867 void TemplateTable::lookupswitch() {
duke@0 1868 transition(itos, itos);
duke@0 1869 __ stop("lookupswitch bytecode should have been rewritten");
duke@0 1870 }
duke@0 1871
duke@0 1872
duke@0 1873 void TemplateTable::fast_linearswitch() {
duke@0 1874 transition(itos, vtos);
duke@0 1875 Label loop_entry, loop, found, continue_execution;
never@304 1876 // bswapl rax, so we can avoid bswapping the table entries
never@304 1877 __ bswapl(rax);
duke@0 1878 // align rsi
never@304 1879 __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
never@304 1880 __ andptr(rbx, -wordSize);
duke@0 1881 // set counter
duke@0 1882 __ movl(rcx, Address(rbx, wordSize));
never@304 1883 __ bswapl(rcx);
duke@0 1884 __ jmpb(loop_entry);
duke@0 1885 // table search
duke@0 1886 __ bind(loop);
duke@0 1887 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
duke@0 1888 __ jccb(Assembler::equal, found);
duke@0 1889 __ bind(loop_entry);
never@304 1890 __ decrementl(rcx);
duke@0 1891 __ jcc(Assembler::greaterEqual, loop);
duke@0 1892 // default case
duke@0 1893 __ profile_switch_default(rax);
duke@0 1894 __ movl(rdx, Address(rbx, 0));
duke@0 1895 __ jmpb(continue_execution);
duke@0 1896 // entry found -> get offset
duke@0 1897 __ bind(found);
duke@0 1898 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
duke@0 1899 __ profile_switch_case(rcx, rax, rbx);
duke@0 1900 // continue execution
duke@0 1901 __ bind(continue_execution);
never@304 1902 __ bswapl(rdx);
duke@0 1903 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
never@304 1904 __ addptr(rsi, rdx);
duke@0 1905 __ dispatch_only(vtos);
duke@0 1906 }
duke@0 1907
duke@0 1908
duke@0 1909 void TemplateTable::fast_binaryswitch() {
duke@0 1910 transition(itos, vtos);
duke@0 1911 // Implementation using the following core algorithm:
duke@0 1912 //
duke@0 1913 // int binary_search(int key, LookupswitchPair* array, int n) {
duke@0 1914 // // Binary search according to "Methodik des Programmierens" by
duke@0 1915 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
duke@0 1916 // int i = 0;
duke@0 1917 // int j = n;
duke@0 1918 // while (i+1 < j) {
duke@0 1919 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
duke@0 1920 // // with Q: for all i: 0 <= i < n: key < a[i]
duke@0 1921 // // where a stands for the array and assuming that the (inexisting)
duke@0 1922 // // element a[n] is infinitely big.
duke@0 1923 // int h = (i + j) >> 1;
duke@0 1924 // // i < h < j
duke@0 1925 // if (key < array[h].fast_match()) {
duke@0 1926 // j = h;
duke@0 1927 // } else {
duke@0 1928 // i = h;
duke@0 1929 // }
duke@0 1930 // }
duke@0 1931 // // R: a[i] <= key < a[i+1] or Q
duke@0 1932 // // (i.e., if key is within array, i is the correct index)
duke@0 1933 // return i;
duke@0 1934 // }
duke@0 1935
duke@0 1936 // register allocation
duke@0 1937 const Register key = rax; // already set (tosca)
duke@0 1938 const Register array = rbx;
duke@0 1939 const Register i = rcx;
duke@0 1940 const Register j = rdx;
duke@0 1941 const Register h = rdi; // needs to be restored
duke@0 1942 const Register temp = rsi;
duke@0 1943 // setup array
duke@0 1944 __ save_bcp();
duke@0 1945
never@304 1946 __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
never@304 1947 __ andptr(array, -wordSize);
duke@0 1948 // initialize i & j
duke@0 1949 __ xorl(i, i); // i = 0;
duke@0 1950 __ movl(j, Address(array, -wordSize)); // j = length(array);
duke@0 1951 // Convert j into native byteordering
never@304 1952 __ bswapl(j);
duke@0 1953 // and start
duke@0 1954 Label entry;
duke@0 1955 __ jmp(entry);
duke@0 1956
duke@0 1957 // binary search loop
duke@0 1958 { Label loop;
duke@0 1959 __ bind(loop);
duke@0 1960 // int h = (i + j) >> 1;
duke@0 1961 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
duke@0 1962 __ sarl(h, 1); // h = (i + j) >> 1;
duke@0 1963 // if (key < array[h].fast_match()) {
duke@0 1964 // j = h;
duke@0 1965 // } else {
duke@0 1966 // i = h;
duke@0 1967 // }
duke@0 1968 // Convert array[h].match to native byte-ordering before compare
duke@0 1969 __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
never@304 1970 __ bswapl(temp);
duke@0 1971 __ cmpl(key, temp);
duke@0 1972 if (VM_Version::supports_cmov()) {
duke@0 1973 __ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
duke@0 1974 __ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
duke@0 1975 } else {
duke@0 1976 Label set_i, end_of_if;
never@304 1977 __ jccb(Assembler::greaterEqual, set_i); // {
never@304 1978 __ mov(j, h); // j = h;
never@304 1979 __ jmp(end_of_if); // }
never@304 1980 __ bind(set_i); // else {
never@304 1981 __ mov(i, h); // i = h;
never@304 1982 __ bind(end_of_if); // }
duke@0 1983 }
duke@0 1984 // while (i+1 < j)
duke@0 1985 __ bind(entry);
duke@0 1986 __ leal(h, Address(i, 1)); // i+1
duke@0 1987 __ cmpl(h, j); // i+1 < j
duke@0 1988 __ jcc(Assembler::less, loop);
duke@0 1989 }
duke@0 1990
duke@0 1991 // end of binary search, result index is i (must check again!)
duke@0 1992 Label default_case;
duke@0 1993 // Convert array[i].match to native byte-ordering before compare
duke@0 1994 __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
never@304 1995 __ bswapl(temp);
duke@0 1996 __ cmpl(key, temp);
duke@0 1997 __ jcc(Assembler::notEqual, default_case);
duke@0 1998
duke@0 1999 // entry found -> j = offset
duke@0 2000 __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
duke@0 2001 __ profile_switch_case(i, key, array);
never@304 2002 __ bswapl(j);
never@304 2003 LP64_ONLY(__ movslq(j, j));
duke@0 2004 __ restore_bcp();
duke@0 2005 __ restore_locals(); // restore rdi
duke@0 2006 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
duke@0 2007
never@304 2008 __ addptr(rsi, j);
duke@0 2009 __ dispatch_only(vtos);
duke@0 2010
duke@0 2011 // default case -> j = default offset
duke@0 2012 __ bind(default_case);
duke@0 2013 __ profile_switch_default(i);
duke@0 2014 __ movl(j, Address(array, -2*wordSize));
never@304 2015 __ bswapl(j);
never@304 2016 LP64_ONLY(__ movslq(j, j));
duke@0 2017 __ restore_bcp();
duke@0 2018 __ restore_locals(); // restore rdi
duke@0 2019 __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
never@304 2020 __ addptr(rsi, j);
duke@0 2021 __ dispatch_only(vtos);
duke@0 2022 }
duke@0 2023
duke@0 2024
duke@0 2025 void TemplateTable::_return(TosState state) {
duke@0 2026 transition(state, state);
duke@0 2027 assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
duke@0 2028
duke@0 2029 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
duke@0 2030 assert(state == vtos, "only valid state");
never@304 2031 __ movptr(rax, aaddress(0));
never@304 2032 __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
duke@0 2033 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
duke@0 2034 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
duke@0 2035 Label skip_register_finalizer;
duke@0 2036 __ jcc(Assembler::zero, skip_register_finalizer);
duke@0 2037
duke@0 2038 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
duke@0 2039
duke@0 2040 __ bind(skip_register_finalizer);
duke@0 2041 }
duke@0 2042
duke@0 2043 __ remove_activation(state, rsi);
duke@0 2044 __ jmp(rsi);
duke@0 2045 }
duke@0 2046
duke@0 2047
duke@0 2048 // ----------------------------------------------------------------------------
duke@0 2049 // Volatile variables demand their effects be made known to all CPU's in
duke@0 2050 // order. Store buffers on most chips allow reads & writes to reorder; the
duke@0 2051 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
duke@0 2052 // memory barrier (i.e., it's not sufficient that the interpreter does not
duke@0 2053 // reorder volatile references, the hardware also must not reorder them).
duke@0 2054 //
duke@0 2055 // According to the new Java Memory Model (JMM):
duke@0 2056 // (1) All volatiles are serialized wrt to each other.
duke@0 2057 // ALSO reads & writes act as aquire & release, so:
duke@0 2058 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
duke@0 2059 // the read float up to before the read. It's OK for non-volatile memory refs
duke@0 2060 // that happen before the volatile read to float down below it.
duke@0 2061 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
duke@0 2062 // that happen BEFORE the write float down to after the write. It's OK for
duke@0 2063 // non-volatile memory refs that happen after the volatile write to float up
duke@0 2064 // before it.
duke@0 2065 //
duke@0 2066 // We only put in barriers around volatile refs (they are expensive), not
duke@0 2067 // _between_ memory refs (that would require us to track the flavor of the
duke@0 2068 // previous memory refs). Requirements (2) and (3) require some barriers
duke@0 2069 // before volatile stores and after volatile loads. These nearly cover
duke@0 2070 // requirement (1) but miss the volatile-store-volatile-load case. This final
duke@0 2071 // case is placed after volatile-stores although it could just as well go
duke@0 2072 // before volatile-loads.
never@304 2073 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
duke@0 2074 // Helper function to insert a is-volatile test and memory barrier
duke@0 2075 if( !os::is_MP() ) return; // Not needed on single CPU
never@304 2076 __ membar(order_constraint);
duke@0 2077 }
duke@0 2078
jrose@1486 2079 void TemplateTable::resolve_cache_and_index(int byte_no,
jrose@1486 2080 Register result,
jrose@1486 2081 Register Rcache,
jrose@1486 2082 Register index,
jrose@1486 2083 size_t index_size) {
duke@0 2084 Register temp = rbx;
duke@0 2085
jrose@1486 2086 assert_different_registers(result, Rcache, index, temp);
jrose@1486 2087
duke@0 2088 Label resolved;
jrose@1486 2089 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
jrose@1486 2090 if (byte_no == f1_oop) {
jrose@1486 2091 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
jrose@1486 2092 // This kind of CP cache entry does not need to match the flags byte, because
jrose@1486 2093 // there is a 1-1 relation between bytecode type and CP entry type.
jrose@1486 2094 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD)
jrose@1486 2095 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
jrose@1486 2096 __ testptr(result, result);
jrose@726 2097 __ jcc(Assembler::notEqual, resolved);
jrose@726 2098 } else {
jrose@1486 2099 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
jrose@1486 2100 assert(result == noreg, ""); //else change code for setting result
jrose@1486 2101 const int shift_count = (1 + byte_no)*BitsPerByte;
jrose@726 2102 __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
jrose@726 2103 __ shrl(temp, shift_count);
jrose@726 2104 // have we resolved this bytecode?
jrose@726 2105 __ andl(temp, 0xFF);
jrose@726 2106 __ cmpl(temp, (int)bytecode());
jrose@726 2107 __ jcc(Assembler::equal, resolved);
jrose@726 2108 }
duke@0 2109
duke@0 2110 // resolve first time through
duke@0 2111 address entry;
duke@0 2112 switch (bytecode()) {
duke@0 2113 case Bytecodes::_getstatic : // fall through
duke@0 2114 case Bytecodes::_putstatic : // fall through
duke@0 2115 case Bytecodes::_getfield : // fall through
duke@0 2116 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
duke@0 2117 case Bytecodes::_invokevirtual : // fall through
duke@0 2118 case Bytecodes::_invokespecial : // fall through
duke@0 2119 case Bytecodes::_invokestatic : // fall through
duke@0 2120 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
jrose@726 2121 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
jrose@1524 2122 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
jrose@1524 2123 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
duke@0 2124 default : ShouldNotReachHere(); break;
duke@0 2125 }
duke@0 2126 __ movl(temp, (int)bytecode());
duke@0 2127 __ call_VM(noreg, entry, temp);
duke@0 2128 // Update registers with resolved info
jrose@1486 2129 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
jrose@1486 2130 if (result != noreg)
jrose@1486 2131 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()));
duke@0 2132 __ bind(resolved);
duke@0 2133 }
duke@0 2134
duke@0 2135
duke@0 2136 // The cache and index registers must be set before call
duke@0 2137 void TemplateTable::load_field_cp_cache_entry(Register obj,
duke@0 2138 Register cache,
duke@0 2139 Register index,
duke@0 2140 Register off,
duke@0 2141 Register flags,
duke@0 2142 bool is_static = false) {
duke@0 2143 assert_different_registers(cache, index, flags, off);
duke@0 2144
duke@0 2145 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2146 // Field offset
never@304 2147 __ movptr(off, Address(cache, index, Address::times_ptr,
never@304 2148 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
duke@0 2149 // Flags
never@304 2150 __ movl(flags, Address(cache, index, Address::times_ptr,
duke@0 2151 in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
duke@0 2152
duke@0 2153 // klass overwrite register
duke@0 2154 if (is_static) {
never@304 2155 __ movptr(obj, Address(cache, index, Address::times_ptr,
never@304 2156 in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
duke@0 2157 }
duke@0 2158 }
duke@0 2159
duke@0 2160 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
duke@0 2161 Register method,
duke@0 2162 Register itable_index,
duke@0 2163 Register flags,
duke@0 2164 bool is_invokevirtual,
jrose@1486 2165 bool is_invokevfinal /*unused*/,
jrose@1486 2166 bool is_invokedynamic) {
duke@0 2167 // setup registers
duke@0 2168 const Register cache = rcx;
duke@0 2169 const Register index = rdx;
duke@0 2170 assert_different_registers(method, flags);
duke@0 2171 assert_different_registers(method, cache, index);
duke@0 2172 assert_different_registers(itable_index, flags);
duke@0 2173 assert_different_registers(itable_index, cache, index);
duke@0 2174 // determine constant pool cache field offsets
duke@0 2175 const int method_offset = in_bytes(
duke@0 2176 constantPoolCacheOopDesc::base_offset() +
duke@0 2177 (is_invokevirtual
duke@0 2178 ? ConstantPoolCacheEntry::f2_offset()
duke@0 2179 : ConstantPoolCacheEntry::f1_offset()
duke@0 2180 )
duke@0 2181 );
duke@0 2182 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
duke@0 2183 ConstantPoolCacheEntry::flags_offset());
duke@0 2184 // access constant pool cache fields
duke@0 2185 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() +
duke@0 2186 ConstantPoolCacheEntry::f2_offset());
duke@0 2187
jrose@1486 2188 if (byte_no == f1_oop) {
jrose@1486 2189 // Resolved f1_oop goes directly into 'method' register.
jrose@1486 2190 assert(is_invokedynamic, "");
jrose@1486 2191 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4));
jrose@1486 2192 } else {
jrose@1486 2193 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
jrose@1486 2194 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
jrose@1486 2195 }
duke@0 2196 if (itable_index != noreg) {
never@304 2197 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
duke@0 2198 }
jrose@1486 2199 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
duke@0 2200 }
duke@0 2201
duke@0 2202
duke@0 2203 // The registers cache and index expected to be set before call.
duke@0 2204 // Correct values of the cache and index registers are preserved.
duke@0 2205 void TemplateTable::jvmti_post_field_access(Register cache,
duke@0 2206 Register index,
duke@0 2207 bool is_static,
duke@0 2208 bool has_tos) {
duke@0 2209 if (JvmtiExport::can_post_field_access()) {
duke@0 2210 // Check to see if a field access watch has been set before we take
duke@0 2211 // the time to call into the VM.
duke@0 2212 Label L1;
duke@0 2213 assert_different_registers(cache, index, rax);
duke@0 2214 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
duke@0 2215 __ testl(rax,rax);
duke@0 2216 __ jcc(Assembler::zero, L1);
duke@0 2217
duke@0 2218 // cache entry pointer
never@304 2219 __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
duke@0 2220 __ shll(index, LogBytesPerWord);
never@304 2221 __ addptr(cache, index);
duke@0 2222 if (is_static) {
never@304 2223 __ xorptr(rax, rax); // NULL object reference
duke@0 2224 } else {
duke@0 2225 __ pop(atos); // Get the object
duke@0 2226 __ verify_oop(rax);
duke@0 2227 __ push(atos); // Restore stack state
duke@0 2228 }
duke@0 2229 // rax,: object pointer or NULL
duke@0 2230 // cache: cache entry pointer
duke@0 2231 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
duke@0 2232 rax, cache);
duke@0 2233 __ get_cache_and_index_at_bcp(cache, index, 1);
duke@0 2234 __ bind(L1);
duke@0 2235 }
duke@0 2236 }
duke@0 2237
duke@0 2238 void TemplateTable::pop_and_check_object(Register r) {
duke@0 2239 __ pop_ptr(r);
duke@0 2240 __ null_check(r); // for field access must check obj.
duke@0 2241 __ verify_oop(r);
duke@0 2242 }
duke@0 2243
duke@0 2244 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
duke@0 2245 transition(vtos, vtos);
duke@0 2246
duke@0 2247 const Register cache = rcx;
duke@0 2248 const Register index = rdx;
duke@0 2249 const Register obj = rcx;
duke@0 2250 const Register off = rbx;
duke@0 2251 const Register flags = rax;
duke@0 2252
jrose@1486 2253 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
duke@0 2254 jvmti_post_field_access(cache, index, is_static, false);
duke@0 2255 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
duke@0 2256
duke@0 2257 if (!is_static) pop_and_check_object(obj);
duke@0 2258
duke@0 2259 const Address lo(obj, off, Address::times_1, 0*wordSize);
duke@0 2260 const Address hi(obj, off, Address::times_1, 1*wordSize);
duke@0 2261
duke@0 2262 Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
duke@0 2263
duke@0 2264 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
duke@0 2265 assert(btos == 0, "change code, btos != 0");
duke@0 2266 // btos
never@304 2267 __ andptr(flags, 0x0f);
duke@0 2268 __ jcc(Assembler::notZero, notByte);
duke@0 2269
duke@0 2270 __ load_signed_byte(rax, lo );
duke@0 2271 __ push(btos);
duke@0 2272 // Rewrite bytecode to be faster
duke@0 2273 if (!is_static) {
duke@0 2274 patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
duke@0 2275 }
duke@0 2276 __ jmp(Done);
duke@0 2277
duke@0 2278 __ bind(notByte);
duke@0 2279 // itos
duke@0 2280 __ cmpl(flags, itos );
duke@0 2281 __ jcc(Assembler::notEqual, notInt);
duke@0 2282
duke@0 2283 __ movl(rax, lo );
duke@0 2284 __ push(itos);
duke@0 2285 // Rewrite bytecode to be faster
duke@0 2286 if (!is_static) {
duke@0 2287 patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
duke@0 2288 }
duke@0 2289 __ jmp(Done);
duke@0 2290
duke@0 2291 __ bind(notInt);
duke@0 2292 // atos
duke@0 2293 __ cmpl(flags, atos );
duke@0 2294 __ jcc(Assembler::notEqual, notObj);
duke@0 2295
duke@0 2296 __ movl(rax, lo );
duke@0 2297 __ push(atos);
duke@0 2298 if (!is_static) {
duke@0 2299 patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
duke@0 2300 }
duke@0 2301 __ jmp(Done);
duke@0 2302
duke@0 2303 __ bind(notObj);
duke@0 2304 // ctos
duke@0 2305 __ cmpl(flags, ctos );
duke@0 2306 __ jcc(Assembler::notEqual, notChar);
duke@0 2307
jrose@622 2308 __ load_unsigned_short(rax, lo );
duke@0 2309 __ push(ctos);
duke@0 2310 if (!is_static) {
duke@0 2311 patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
duke@0 2312 }
duke@0 2313 __ jmp(Done);
duke@0 2314
duke@0 2315 __ bind(notChar);
duke@0 2316 // stos
duke@0 2317 __ cmpl(flags, stos );
duke@0 2318 __ jcc(Assembler::notEqual, notShort);
duke@0 2319
jrose@622 2320 __ load_signed_short(rax, lo );
duke@0 2321 __ push(stos);
duke@0 2322 if (!is_static) {
duke@0 2323 patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
duke@0 2324 }
duke@0 2325 __ jmp(Done);
duke@0 2326
duke@0 2327 __ bind(notShort);
duke@0 2328 // ltos
duke@0 2329 __ cmpl(flags, ltos );
duke@0 2330 __ jcc(Assembler::notEqual, notLong);
duke@0 2331
duke@0 2332 // Generate code as if volatile. There just aren't enough registers to
duke@0 2333 // save that information and this code is faster than the test.
duke@0 2334 __ fild_d(lo); // Must load atomically
never@304 2335 __ subptr(rsp,2*wordSize); // Make space for store
duke@0 2336 __ fistp_d(Address(rsp,0));
never@304 2337 __ pop(rax);
never@304 2338 __ pop(rdx);
duke@0 2339
duke@0 2340 __ push(ltos);
duke@0 2341 // Don't rewrite to _fast_lgetfield for potential volatile case.
duke@0 2342 __ jmp(Done);
duke@0 2343
duke@0 2344 __ bind(notLong);
duke@0 2345 // ftos
duke@0 2346 __ cmpl(flags, ftos );
duke@0 2347 __ jcc(Assembler::notEqual, notFloat);
duke@0 2348
duke@0 2349 __ fld_s(lo);
duke@0 2350 __ push(ftos);
duke@0 2351 if (!is_static) {
duke@0 2352 patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
duke@0 2353 }
duke@0 2354 __ jmp(Done);
duke@0 2355
duke@0 2356 __ bind(notFloat);
duke@0 2357 // dtos
duke@0 2358 __ cmpl(flags, dtos );
duke@0 2359 __ jcc(Assembler::notEqual, notDouble);
duke@0 2360
duke@0 2361 __ fld_d(lo);
duke@0 2362 __ push(dtos);
duke@0 2363 if (!is_static) {
duke@0 2364 patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
duke@0 2365 }
duke@0 2366 __ jmpb(Done);
duke@0 2367
duke@0 2368 __ bind(notDouble);
duke@0 2369
duke@0 2370 __ stop("Bad state");
duke@0 2371
duke@0 2372 __ bind(Done);
duke@0 2373 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
duke@0 2374 // volatile_barrier( );
duke@0 2375 }
duke@0 2376
duke@0 2377
duke@0 2378 void TemplateTable::getfield(int byte_no) {
duke@0 2379 getfield_or_static(byte_no, false);
duke@0 2380 }
duke@0 2381
duke@0 2382
duke@0 2383 void TemplateTable::getstatic(int byte_no) {
duke@0 2384 getfield_or_static(byte_no, true);
duke@0 2385 }
duke@0 2386
duke@0 2387 // The registers cache and index expected to be set before call.
duke@0 2388 // The function may destroy various registers, just not the cache and index registers.
duke@0 2389 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
duke@0 2390
duke@0 2391 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2392
duke@0 2393 if (JvmtiExport::can_post_field_modification()) {
duke@0 2394 // Check to see if a field modification watch has been set before we take
duke@0 2395 // the time to call into the VM.
duke@0 2396 Label L1;
duke@0 2397 assert_different_registers(cache, index, rax);
duke@0 2398 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
duke@0 2399 __ testl(rax, rax);
duke@0 2400 __ jcc(Assembler::zero, L1);
duke@0 2401
duke@0 2402 // The cache and index registers have been already set.
duke@0 2403 // This allows to eliminate this call but the cache and index
duke@0 2404 // registers have to be correspondingly used after this line.
duke@0 2405 __ get_cache_and_index_at_bcp(rax, rdx, 1);
duke@0 2406
duke@0 2407 if (is_static) {
duke@0 2408 // Life is simple. Null out the object pointer.
never@304 2409 __ xorptr(rbx, rbx);
duke@0 2410 } else {
duke@0 2411 // Life is harder. The stack holds the value on top, followed by the object.
duke@0 2412 // We don't know the size of the value, though; it could be one or two words
duke@0 2413 // depending on its type. As a result, we must find the type to determine where
duke@0 2414 // the object is.
duke@0 2415 Label two_word, valsize_known;
never@304 2416 __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
duke@0 2417 ConstantPoolCacheEntry::flags_offset())));
never@304 2418 __ mov(rbx, rsp);
duke@0 2419 __ shrl(rcx, ConstantPoolCacheEntry::tosBits);
duke@0 2420 // Make sure we don't need to mask rcx for tosBits after the above shift
duke@0 2421 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2422 __ cmpl(rcx, ltos);
duke@0 2423 __ jccb(Assembler::equal, two_word);
duke@0 2424 __ cmpl(rcx, dtos);
duke@0 2425 __ jccb(Assembler::equal, two_word);
never@304 2426 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
duke@0 2427 __ jmpb(valsize_known);
duke@0 2428
duke@0 2429 __ bind(two_word);
never@304 2430 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
duke@0 2431
duke@0 2432 __ bind(valsize_known);
duke@0 2433 // setup object pointer
never@304 2434 __ movptr(rbx, Address(rbx, 0));
duke@0 2435 }
duke@0 2436 // cache entry pointer
never@304 2437 __ addptr(rax, in_bytes(cp_base_offset));
duke@0 2438 __ shll(rdx, LogBytesPerWord);
never@304 2439 __ addptr(rax, rdx);
duke@0 2440 // object (tos)
never@304 2441 __ mov(rcx, rsp);
duke@0 2442 // rbx,: object pointer set up above (NULL if static)
duke@0 2443 // rax,: cache entry pointer
duke@0 2444 // rcx: jvalue object on the stack
duke@0 2445 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
duke@0 2446 rbx, rax, rcx);
duke@0 2447 __ get_cache_and_index_at_bcp(cache, index, 1);
duke@0 2448 __ bind(L1);
duke@0 2449 }
duke@0 2450 }
duke@0 2451
duke@0 2452
duke@0 2453 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
duke@0 2454 transition(vtos, vtos);
duke@0 2455
duke@0 2456 const Register cache = rcx;
duke@0 2457 const Register index = rdx;
duke@0 2458 const Register obj = rcx;
duke@0 2459 const Register off = rbx;
duke@0 2460 const Register flags = rax;
duke@0 2461
jrose@1486 2462 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2));
duke@0 2463 jvmti_post_field_mod(cache, index, is_static);
duke@0 2464 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
duke@0 2465
duke@0 2466 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
duke@0 2467 // volatile_barrier( );
duke@0 2468
duke@0 2469 Label notVolatile, Done;
duke@0 2470 __ movl(rdx, flags);
duke@0 2471 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
duke@0 2472 __ andl(rdx, 0x1);
duke@0 2473
duke@0 2474 // field addresses
duke@0 2475 const Address lo(obj, off, Address::times_1, 0*wordSize);
duke@0 2476 const Address hi(obj, off, Address::times_1, 1*wordSize);
duke@0 2477
duke@0 2478 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
duke@0 2479
duke@0 2480 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
duke@0 2481 assert(btos == 0, "change code, btos != 0");
duke@0 2482 // btos
duke@0 2483 __ andl(flags, 0x0f);
duke@0 2484 __ jcc(Assembler::notZero, notByte);
duke@0 2485
duke@0 2486 __ pop(btos);
duke@0 2487 if (!is_static) pop_and_check_object(obj);
duke@0 2488 __ movb(lo, rax );
duke@0 2489 if (!is_static) {
duke@0 2490 patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx);
duke@0 2491 }
duke@0 2492 __ jmp(Done);
duke@0 2493
duke@0 2494 __ bind(notByte);
duke@0 2495 // itos
duke@0 2496 __ cmpl(flags, itos );
duke@0 2497 __ jcc(Assembler::notEqual, notInt);
duke@0 2498
duke@0 2499 __ pop(itos);
duke@0 2500 if (!is_static) pop_and_check_object(obj);
duke@0 2501
duke@0 2502 __ movl(lo, rax );
duke@0 2503 if (!is_static) {
duke@0 2504 patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx);
duke@0 2505 }
duke@0 2506 __ jmp(Done);
duke@0 2507
duke@0 2508 __ bind(notInt);
duke@0 2509 // atos
duke@0 2510 __ cmpl(flags, atos );
duke@0 2511 __ jcc(Assembler::notEqual, notObj);
duke@0 2512
duke@0 2513 __ pop(atos);
duke@0 2514 if (!is_static) pop_and_check_object(obj);
duke@0 2515
ysr@342 2516 do_oop_store(_masm, lo, rax, _bs->kind(), false);
ysr@342 2517
duke@0 2518 if (!is_static) {
duke@0 2519 patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
duke@0 2520 }
ysr@342 2521
duke@0 2522 __ jmp(Done);
duke@0 2523
duke@0 2524 __ bind(notObj);
duke@0 2525 // ctos
duke@0 2526 __ cmpl(flags, ctos );
duke@0 2527 __ jcc(Assembler::notEqual, notChar);
duke@0 2528
duke@0 2529 __ pop(ctos);
duke@0 2530 if (!is_static) pop_and_check_object(obj);
duke@0 2531 __ movw(lo, rax );
duke@0 2532 if (!is_static) {
duke@0 2533 patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx);
duke@0 2534 }
duke@0 2535 __ jmp(Done);
duke@0 2536
duke@0 2537 __ bind(notChar);
duke@0 2538 // stos
duke@0 2539 __ cmpl(flags, stos );
duke@0 2540 __ jcc(Assembler::notEqual, notShort);
duke@0 2541
duke@0 2542 __ pop(stos);
duke@0 2543 if (!is_static) pop_and_check_object(obj);
duke@0 2544 __ movw(lo, rax );
duke@0 2545 if (!is_static) {
duke@0 2546 patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx);
duke@0 2547 }
duke@0 2548 __ jmp(Done);
duke@0 2549
duke@0 2550 __ bind(notShort);
duke@0 2551 // ltos
duke@0 2552 __ cmpl(flags, ltos );
duke@0 2553 __ jcc(Assembler::notEqual, notLong);
duke@0 2554
duke@0 2555 Label notVolatileLong;
duke@0 2556 __ testl(rdx, rdx);
duke@0 2557 __ jcc(Assembler::zero, notVolatileLong);
duke@0 2558
duke@0 2559 __ pop(ltos); // overwrites rdx, do this after testing volatile.
duke@0 2560 if (!is_static) pop_and_check_object(obj);
duke@0 2561
duke@0 2562 // Replace with real volatile test
never@304 2563 __ push(rdx);
never@304 2564 __ push(rax); // Must update atomically with FIST
duke@0 2565 __ fild_d(Address(rsp,0)); // So load into FPU register
duke@0 2566 __ fistp_d(lo); // and put into memory atomically
never@304 2567 __ addptr(rsp, 2*wordSize);
never@304 2568 // volatile_barrier();
never@304 2569 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
never@304 2570 Assembler::StoreStore));
duke@0 2571 // Don't rewrite volatile version
duke@0 2572 __ jmp(notVolatile);
duke@0 2573
duke@0 2574 __ bind(notVolatileLong);
duke@0 2575
duke@0 2576 __ pop(ltos); // overwrites rdx
duke@0 2577 if (!is_static) pop_and_check_object(obj);
never@304 2578 NOT_LP64(__ movptr(hi, rdx));
never@304 2579 __ movptr(lo, rax);
duke@0 2580 if (!is_static) {
duke@0 2581 patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
duke@0 2582 }
duke@0 2583 __ jmp(notVolatile);
duke@0 2584
duke@0 2585 __ bind(notLong);
duke@0 2586 // ftos
duke@0 2587 __ cmpl(flags, ftos );
duke@0 2588 __ jcc(Assembler::notEqual, notFloat);
duke@0 2589
duke@0 2590 __ pop(ftos);
duke@0 2591 if (!is_static) pop_and_check_object(obj);
duke@0 2592 __ fstp_s(lo);
duke@0 2593 if (!is_static) {
duke@0 2594 patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx);
duke@0 2595 }
duke@0 2596 __ jmp(Done);
duke@0 2597
duke@0 2598 __ bind(notFloat);
duke@0 2599 // dtos
duke@0 2600 __ cmpl(flags, dtos );
duke@0 2601 __ jcc(Assembler::notEqual, notDouble);
duke@0 2602
duke@0 2603 __ pop(dtos);
duke@0 2604 if (!is_static) pop_and_check_object(obj);
duke@0 2605 __ fstp_d(lo);
duke@0 2606 if (!is_static) {
duke@0 2607 patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx);
duke@0 2608 }
duke@0 2609 __ jmp(Done);
duke@0 2610
duke@0 2611 __ bind(notDouble);
duke@0 2612
duke@0 2613 __ stop("Bad state");
duke@0 2614
duke@0 2615 __ bind(Done);
duke@0 2616
duke@0 2617 // Check for volatile store
duke@0 2618 __ testl(rdx, rdx);
duke@0 2619 __ jcc(Assembler::zero, notVolatile);
never@304 2620 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
never@304 2621 Assembler::StoreStore));
duke@0 2622 __ bind(notVolatile);
duke@0 2623 }
duke@0 2624
duke@0 2625
duke@0 2626 void TemplateTable::putfield(int byte_no) {
duke@0 2627 putfield_or_static(byte_no, false);
duke@0 2628 }
duke@0 2629
duke@0 2630
duke@0 2631 void TemplateTable::putstatic(int byte_no) {
duke@0 2632 putfield_or_static(byte_no, true);
duke@0 2633 }
duke@0 2634
duke@0 2635 void TemplateTable::jvmti_post_fast_field_mod() {
duke@0 2636 if (JvmtiExport::can_post_field_modification()) {
duke@0 2637 // Check to see if a field modification watch has been set before we take
duke@0 2638 // the time to call into the VM.
duke@0 2639 Label L2;
duke@0 2640 __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
duke@0 2641 __ testl(rcx,rcx);
duke@0 2642 __ jcc(Assembler::zero, L2);
duke@0 2643 __ pop_ptr(rbx); // copy the object pointer from tos
duke@0 2644 __ verify_oop(rbx);
duke@0 2645 __ push_ptr(rbx); // put the object pointer back on tos
never@304 2646 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
never@304 2647 __ mov(rcx, rsp);
duke@0 2648 __ push_ptr(rbx); // save object pointer so we can steal rbx,
never@304 2649 __ xorptr(rbx, rbx);
duke@0 2650 const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
duke@0 2651 const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
duke@0 2652 switch (bytecode()) { // load values into the jvalue object
duke@0 2653 case Bytecodes::_fast_bputfield: __ movb(lo_value, rax); break;
duke@0 2654 case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
duke@0 2655 case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
duke@0 2656 case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
never@304 2657 case Bytecodes::_fast_lputfield:
never@304 2658 NOT_LP64(__ movptr(hi_value, rdx));
never@304 2659 __ movptr(lo_value, rax);
never@304 2660 break;
never@304 2661
duke@0 2662 // need to call fld_s() after fstp_s() to restore the value for below
duke@0 2663 case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
never@304 2664
duke@0 2665 // need to call fld_d() after fstp_d() to restore the value for below
duke@0 2666 case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
never@304 2667
duke@0 2668 // since rcx is not an object we don't call store_check() here
never@304 2669 case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
never@304 2670
duke@0 2671 default: ShouldNotReachHere();
duke@0 2672 }
duke@0 2673 __ pop_ptr(rbx); // restore copy of object pointer
duke@0 2674
duke@0 2675 // Save rax, and sometimes rdx because call_VM() will clobber them,
duke@0 2676 // then use them for JVM/DI purposes
never@304 2677 __ push(rax);
never@304 2678 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
duke@0 2679 // access constant pool cache entry
duke@0 2680 __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
duke@0 2681 __ verify_oop(rbx);
duke@0 2682 // rbx,: object pointer copied above
duke@0 2683 // rax,: cache entry pointer
duke@0 2684 // rcx: jvalue object on the stack
duke@0 2685 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
never@304 2686 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
never@304 2687 __ pop(rax); // restore lower value
never@304 2688 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
duke@0 2689 __ bind(L2);
duke@0 2690 }
duke@0 2691 }
duke@0 2692
duke@0 2693 void TemplateTable::fast_storefield(TosState state) {
duke@0 2694 transition(state, vtos);
duke@0 2695
duke@0 2696 ByteSize base = constantPoolCacheOopDesc::base_offset();
duke@0 2697
duke@0 2698 jvmti_post_fast_field_mod();
duke@0 2699
duke@0 2700 // access constant pool cache
duke@0 2701 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
duke@0 2702
duke@0 2703 // test for volatile with rdx but rdx is tos register for lputfield.
never@304 2704 if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
never@304 2705 __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
duke@0 2706 ConstantPoolCacheEntry::flags_offset())));
duke@0 2707
duke@0 2708 // replace index with field offset from cache entry
never@304 2709 __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
duke@0 2710
duke@0 2711 // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
duke@0 2712 // volatile_barrier( );
duke@0 2713
duke@0 2714 Label notVolatile, Done;
duke@0 2715 __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
duke@0 2716 __ andl(rdx, 0x1);
duke@0 2717 // Check for volatile store
duke@0 2718 __ testl(rdx, rdx);
duke@0 2719 __ jcc(Assembler::zero, notVolatile);
duke@0 2720
never@304 2721 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
duke@0 2722
duke@0 2723 // Get object from stack
duke@0 2724 pop_and_check_object(rcx);
duke@0 2725
duke@0 2726 // field addresses
duke@0 2727 const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
duke@0 2728 const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
duke@0 2729
duke@0 2730 // access field
duke@0 2731 switch (bytecode()) {
duke@0 2732 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
duke@0 2733 case Bytecodes::_fast_sputfield: // fall through
duke@0 2734 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
duke@0 2735 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
never@304 2736 case Bytecodes::_fast_lputfield:
never@304 2737 NOT_LP64(__ movptr(hi, rdx));
never@304 2738 __ movptr(lo, rax);
never@304 2739 break;
duke@0 2740 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
duke@0 2741 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
ysr@342 2742 case Bytecodes::_fast_aputfield: {
ysr@342 2743 do_oop_store(_masm, lo, rax, _bs->kind(), false);
ysr@342 2744 break;
ysr@342 2745 }
duke@0 2746 default:
duke@0 2747 ShouldNotReachHere();
duke@0 2748 }
duke@0 2749
duke@0 2750 Label done;
never@304 2751 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
never@304 2752 Assembler::StoreStore));
ysr@342 2753 // Barriers are so large that short branch doesn't reach!
ysr@342 2754 __ jmp(done);
duke@0 2755
duke@0 2756 // Same code as above, but don't need rdx to test for volatile.
duke@0 2757 __ bind(notVolatile);
duke@0 2758
never@304 2759 if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
duke@0 2760
duke@0 2761 // Get object from stack
duke@0 2762 pop_and_check_object(rcx);
duke@0 2763
duke@0 2764 // access field
duke@0 2765 switch (bytecode()) {
duke@0 2766 case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
duke@0 2767 case Bytecodes::_fast_sputfield: // fall through
duke@0 2768 case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
duke@0 2769 case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
never@304 2770 case Bytecodes::_fast_lputfield:
never@304 2771 NOT_LP64(__ movptr(hi, rdx));
never@304 2772 __ movptr(lo, rax);
never@304 2773 break;
duke@0 2774 case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
duke@0 2775 case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
ysr@342 2776 case Bytecodes::_fast_aputfield: {
ysr@342 2777 do_oop_store(_masm, lo, rax, _bs->kind(), false);
ysr@342 2778 break;
ysr@342 2779 }
duke@0 2780 default:
duke@0 2781 ShouldNotReachHere();
duke@0 2782 }
duke@0 2783 __ bind(done);
duke@0 2784 }
duke@0 2785
duke@0 2786
duke@0 2787 void TemplateTable::fast_accessfield(TosState state) {
duke@0 2788 transition(atos, state);
duke@0 2789
duke@0 2790 // do the JVMTI work here to avoid disturbing the register state below
duke@0 2791 if (JvmtiExport::can_post_field_access()) {
duke@0 2792 // Check to see if a field access watch has been set before we take
duke@0 2793 // the time to call into the VM.
duke@0 2794 Label L1;
duke@0 2795 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
duke@0 2796 __ testl(rcx,rcx);
duke@0 2797 __ jcc(Assembler::zero, L1);
duke@0 2798 // access constant pool cache entry
duke@0 2799 __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
duke@0 2800 __ push_ptr(rax); // save object pointer before call_VM() clobbers it
duke@0 2801 __ verify_oop(rax);
duke@0 2802 // rax,: object pointer copied above
duke@0 2803 // rcx: cache entry pointer
duke@0 2804 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
duke@0 2805 __ pop_ptr(rax); // restore object pointer
duke@0 2806 __ bind(L1);
duke@0 2807 }
duke@0 2808
duke@0 2809 // access constant pool cache
duke@0 2810 __ get_cache_and_index_at_bcp(rcx, rbx, 1);
duke@0 2811 // replace index with field offset from cache entry
never@304 2812 __ movptr(rbx, Address(rcx,
never@304 2813 rbx,
never@304 2814 Address::times_ptr,
never@304 2815 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
duke@0 2816
duke@0 2817
duke@0 2818 // rax,: object
duke@0 2819 __ verify_oop(rax);
duke@0 2820 __ null_check(rax);
duke@0 2821 // field addresses
duke@0 2822 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
duke@0 2823 const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
duke@0 2824
duke@0 2825 // access field
duke@0 2826 switch (bytecode()) {
never@304 2827 case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
jrose@622 2828 case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo ); break;
jrose@622 2829 case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo ); break;
duke@0 2830 case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
duke@0 2831 case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
duke@0 2832 case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
duke@0 2833 case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
never@304 2834 case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
duke@0 2835 default:
duke@0 2836 ShouldNotReachHere();
duke@0 2837 }
duke@0 2838
duke@0 2839 // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
duke@0 2840 // volatile_barrier( );
duke@0 2841 }
duke@0 2842
duke@0 2843 void TemplateTable::fast_xaccess(TosState state) {
duke@0 2844 transition(vtos, state);
duke@0 2845 // get receiver
never@304 2846 __ movptr(rax, aaddress(0));
duke@0 2847 // access constant pool cache
duke@0 2848 __ get_cache_and_index_at_bcp(rcx, rdx, 2);
never@304 2849 __ movptr(rbx, Address(rcx,
never@304 2850 rdx,
never@304 2851 Address::times_ptr,
never@304 2852 in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
duke@0 2853 // make sure exception is reported in correct bcp range (getfield is next instruction)
duke@0 2854 __ increment(rsi);
duke@0 2855 __ null_check(rax);
duke@0 2856 const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
duke@0 2857 if (state == itos) {
duke@0 2858 __ movl(rax, lo);
duke@0 2859 } else if (state == atos) {
never@304 2860 __ movptr(rax, lo);
duke@0 2861 __ verify_oop(rax);
duke@0 2862 } else if (state == ftos) {
duke@0 2863 __ fld_s(lo);
duke@0 2864 } else {
duke@0 2865 ShouldNotReachHere();
duke@0 2866 }
duke@0 2867 __ decrement(rsi);
duke@0 2868 }
duke@0 2869
duke@0 2870
duke@0 2871
duke@0 2872 //----------------------------------------------------------------------------------------------------
duke@0 2873 // Calls
duke@0 2874
duke@0 2875 void TemplateTable::count_calls(Register method, Register temp) {
duke@0 2876 // implemented elsewhere
duke@0 2877 ShouldNotReachHere();
duke@0 2878 }
duke@0 2879
duke@0 2880
jrose@726 2881 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
duke@0 2882 // determine flags
jrose@726 2883 Bytecodes::Code code = bytecode();
duke@0 2884 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
jrose@726 2885 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
duke@0 2886 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
duke@0 2887 const bool is_invokespecial = code == Bytecodes::_invokespecial;
jrose@726 2888 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic);
duke@0 2889 const bool receiver_null_check = is_invokespecial;
duke@0 2890 const bool save_flags = is_invokeinterface || is_invokevirtual;
duke@0 2891 // setup registers & access constant pool cache
duke@0 2892 const Register recv = rcx;
duke@0 2893 const Register flags = rdx;
duke@0 2894 assert_different_registers(method, index, recv, flags);
duke@0 2895
duke@0 2896 // save 'interpreter return address'
duke@0 2897 __ save_bcp();
duke@0 2898
jrose@1486 2899 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
duke@0 2900
duke@0 2901 // load receiver if needed (note: no return address pushed yet)
duke@0 2902 if (load_receiver) {
jrose@1486 2903 assert(!is_invokedynamic, "");
duke@0 2904 __ movl(recv, flags);
duke@0 2905 __ andl(recv, 0xFF);
duke@0 2906 // recv count is 0 based?
jrose@726 2907 Address recv_addr(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1));
twisti@1304 2908 __ movptr(recv, recv_addr);
twisti@1304 2909 __ verify_oop(recv);
duke@0 2910 }
duke@0 2911
duke@0 2912 // do null check if needed
duke@0 2913 if (receiver_null_check) {
duke@0 2914 __ null_check(recv);
duke@0 2915 }
duke@0 2916
duke@0 2917 if (save_flags) {
never@304 2918 __ mov(rsi, flags);
duke@0 2919 }
duke@0 2920
duke@0 2921 // compute return type
duke@0 2922 __ shrl(flags, ConstantPoolCacheEntry::tosBits);
duke@0 2923 // Make sure we don't need to mask flags for tosBits after the above shift
duke@0 2924 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2925 // load return address
never@304 2926 {
jrose@726 2927 address table_addr;
jrose@1059 2928 if (is_invokeinterface || is_invokedynamic)
jrose@726 2929 table_addr = (address)Interpreter::return_5_addrs_by_index_table();
jrose@726 2930 else
jrose@726 2931 table_addr = (address)Interpreter::return_3_addrs_by_index_table();
jrose@726 2932 ExternalAddress table(table_addr);
never@304 2933 __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
duke@0 2934 }
duke@0 2935
duke@0 2936 // push return address
never@304 2937 __ push(flags);
duke@0 2938
duke@0 2939 // Restore flag value from the constant pool cache, and restore rsi
duke@0 2940 // for later null checks. rsi is the bytecode pointer
duke@0 2941 if (save_flags) {
never@304 2942 __ mov(flags, rsi);
duke@0 2943 __ restore_bcp();
duke@0 2944 }
duke@0 2945 }
duke@0 2946
duke@0 2947
duke@0 2948 void TemplateTable::invokevirtual_helper(Register index, Register recv,
duke@0 2949 Register flags) {
duke@0 2950
duke@0 2951 // Uses temporary registers rax, rdx
duke@0 2952 assert_different_registers(index, recv, rax, rdx);
duke@0 2953
duke@0 2954 // Test for an invoke of a final method
duke@0 2955 Label notFinal;
duke@0 2956 __ movl(rax, flags);
duke@0 2957 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod));
duke@0 2958 __ jcc(Assembler::zero, notFinal);
duke@0 2959
duke@0 2960 Register method = index; // method must be rbx,
duke@0 2961 assert(method == rbx, "methodOop must be rbx, for interpreter calling convention");
duke@0 2962
duke@0 2963 // do the call - the index is actually the method to call
duke@0 2964 __ verify_oop(method);
duke@0 2965
duke@0 2966 // It's final, need a null check here!
duke@0 2967 __ null_check(recv);
duke@0 2968
duke@0 2969 // profile this call
duke@0 2970 __ profile_final_call(rax);
duke@0 2971
duke@0 2972 __ jump_from_interpreted(method, rax);
duke@0 2973
duke@0 2974 __ bind(notFinal);
duke@0 2975
duke@0 2976 // get receiver klass
duke@0 2977 __ null_check(recv, oopDesc::klass_offset_in_bytes());
duke@0 2978 // Keep recv in rcx for callee expects it there
never@304 2979 __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
duke@0 2980 __ verify_oop(rax);
duke@0 2981
duke@0 2982 // profile this call
duke@0 2983 __ profile_virtual_call(rax, rdi, rdx);
duke@0 2984
duke@0 2985 // get target methodOop & entry point
duke@0 2986 const int base = instanceKlass::vtable_start_offset() * wordSize;
duke@0 2987 assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
never@304 2988 __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
duke@0 2989 __ jump_from_interpreted(method, rdx);
duke@0 2990 }
duke@0 2991
duke@0 2992
duke@0 2993 void TemplateTable::invokevirtual(int byte_no) {
duke@0 2994 transition(vtos, vtos);
jrose@1486 2995 assert(byte_no == f2_byte, "use this argument");
jrose@726 2996 prepare_invoke(rbx, noreg, byte_no);
duke@0 2997
duke@0 2998 // rbx,: index
duke@0 2999 // rcx: receiver
duke@0 3000 // rdx: flags
duke@0 3001
duke@0 3002 invokevirtual_helper(rbx, rcx, rdx);
duke@0 3003 }
duke@0 3004
duke@0 3005
duke@0 3006 void TemplateTable::invokespecial(int byte_no) {
duke@0 3007 transition(vtos, vtos);
jrose@1486 3008 assert(byte_no == f1_byte, "use this argument");
jrose@726 3009 prepare_invoke(rbx, noreg, byte_no);
duke@0 3010 // do the call
duke@0 3011 __ verify_oop(rbx);
duke@0 3012 __ profile_call(rax);
duke@0 3013 __ jump_from_interpreted(rbx, rax);
duke@0 3014 }
duke@0 3015
duke@0 3016
duke@0 3017 void TemplateTable::invokestatic(int byte_no) {
duke@0 3018 transition(vtos, vtos);
jrose@1486 3019 assert(byte_no == f1_byte, "use this argument");
jrose@726 3020 prepare_invoke(rbx, noreg, byte_no);
duke@0 3021 // do the call
duke@0 3022 __ verify_oop(rbx);
duke@0 3023 __ profile_call(rax);
duke@0 3024 __ jump_from_interpreted(rbx, rax);
duke@0 3025 }
duke@0 3026
duke@0 3027
duke@0 3028 void TemplateTable::fast_invokevfinal(int byte_no) {
duke@0 3029 transition(vtos, vtos);
jrose@1486 3030 assert(byte_no == f2_byte, "use this argument");
duke@0 3031 __ stop("fast_invokevfinal not used on x86");
duke@0 3032 }
duke@0 3033
duke@0 3034
duke@0 3035 void TemplateTable::invokeinterface(int byte_no) {
duke@0 3036 transition(vtos, vtos);
jrose@1486 3037 assert(byte_no == f1_byte, "use this argument");
jrose@726 3038 prepare_invoke(rax, rbx, byte_no);
duke@0 3039
duke@0 3040 // rax,: Interface
duke@0 3041 // rbx,: index
duke@0 3042 // rcx: receiver
duke@0 3043 // rdx: flags
duke@0 3044
duke@0 3045 // Special case of invokeinterface called for virtual method of
duke@0 3046 // java.lang.Object. See cpCacheOop.cpp for details.
duke@0 3047 // This code isn't produced by javac, but could be produced by
duke@0 3048 // another compliant java compiler.
duke@0 3049 Label notMethod;
duke@0 3050 __ movl(rdi, rdx);
duke@0 3051 __ andl(rdi, (1 << ConstantPoolCacheEntry::methodInterface));
duke@0 3052 __ jcc(Assembler::zero, notMethod);
duke@0 3053
duke@0 3054 invokevirtual_helper(rbx, rcx, rdx);
duke@0 3055 __ bind(notMethod);
duke@0 3056
duke@0 3057 // Get receiver klass into rdx - also a null check
duke@0 3058 __ restore_locals(); // restore rdi
never@304 3059 __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
duke@0 3060 __ verify_oop(rdx);
duke@0 3061
duke@0 3062 // profile this call
duke@0 3063 __ profile_virtual_call(rdx, rsi, rdi);
duke@0 3064
jrose@623 3065 Label no_such_interface, no_such_method;
jrose@623 3066
jrose@623 3067 __ lookup_interface_method(// inputs: rec. class, interface, itable index
jrose@623 3068 rdx, rax, rbx,
jrose@623 3069 // outputs: method, scan temp. reg
jrose@623 3070 rbx, rsi,
jrose@623 3071 no_such_interface);
jrose@623 3072
jrose@623 3073 // rbx,: methodOop to call
jrose@623 3074 // rcx: receiver
jrose@623 3075 // Check for abstract method error
jrose@623 3076 // Note: This should be done more efficiently via a throw_abstract_method_error
jrose@623 3077 // interpreter entry point and a conditional jump to it in case of a null
jrose@623 3078 // method.
jrose@623 3079 __ testptr(rbx, rbx);
jrose@623 3080 __ jcc(Assembler::zero, no_such_method);
jrose@623 3081
jrose@623 3082 // do the call
jrose@623 3083 // rcx: receiver
jrose@623 3084 // rbx,: methodOop
jrose@623 3085 __ jump_from_interpreted(rbx, rdx);
jrose@623 3086 __ should_not_reach_here();
jrose@623 3087
jrose@623 3088 // exception handling code follows...
jrose@623 3089 // note: must restore interpreter registers to canonical
jrose@623 3090 // state for exception handling to work correctly!
jrose@623 3091
jrose@623 3092 __ bind(no_such_method);
duke@0 3093 // throw exception
jrose@623 3094 __ pop(rbx); // pop return address (pushed by prepare_invoke)
jrose@623 3095 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
jrose@623 3096 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
jrose@623 3097 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
jrose@623 3098 // the call_VM checks for exception, so we should never return here.
jrose@623 3099 __ should_not_reach_here();
jrose@623 3100
jrose@623 3101 __ bind(no_such_interface);
jrose@623 3102 // throw exception
never@304 3103 __ pop(rbx); // pop return address (pushed by prepare_invoke)
duke@0 3104 __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
duke@0 3105 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
duke@0 3106 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
duke@0 3107 InterpreterRuntime::throw_IncompatibleClassChangeError));
duke@0 3108 // the call_VM checks for exception, so we should never return here.
duke@0 3109 __ should_not_reach_here();
duke@0 3110 }
duke@0 3111
jrose@726 3112 void TemplateTable::invokedynamic(int byte_no) {
jrose@726 3113 transition(vtos, vtos);
jrose@726 3114
jrose@726 3115 if (!EnableInvokeDynamic) {
jrose@726 3116 // We should not encounter this bytecode if !EnableInvokeDynamic.
jrose@726 3117 // The verifier will stop it. However, if we get past the verifier,
jrose@726 3118 // this will stop the thread in a reasonable way, without crashing the JVM.
jrose@726 3119 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
jrose@726 3120 InterpreterRuntime::throw_IncompatibleClassChangeError));
jrose@726 3121 // the call_VM checks for exception, so we should never return here.
jrose@726 3122 __ should_not_reach_here();
jrose@726 3123 return;
jrose@726 3124 }
jrose@726 3125
jrose@1486 3126 assert(byte_no == f1_oop, "use this argument");
jrose@726 3127 prepare_invoke(rax, rbx, byte_no);
jrose@726 3128
jrose@726 3129 // rax: CallSite object (f1)
jrose@726 3130 // rbx: unused (f2)
twisti@1771 3131 // rcx: receiver address
jrose@726 3132 // rdx: flags (unused)
jrose@726 3133
twisti@1771 3134 Register rax_callsite = rax;
twisti@1771 3135 Register rcx_method_handle = rcx;
twisti@1771 3136
jrose@726 3137 if (ProfileInterpreter) {
jrose@726 3138 // %%% should make a type profile for any invokedynamic that takes a ref argument
jrose@726 3139 // profile this call
jrose@726 3140 __ profile_call(rsi);
jrose@726 3141 }
jrose@726 3142
twisti@1771 3143 __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
twisti@1771 3144 __ null_check(rcx_method_handle);
jrose@726 3145 __ prepare_to_jump_from_interpreted();
twisti@1771 3146 __ jump_to_method_handle_entry(rcx_method_handle, rdx);
jrose@726 3147 }
jrose@726 3148
duke@0 3149 //----------------------------------------------------------------------------------------------------
duke@0 3150 // Allocation
duke@0 3151
duke@0 3152 void TemplateTable::_new() {
duke@0 3153 transition(vtos, atos);
duke@0 3154 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
duke@0 3155 Label slow_case;
bobv@1605 3156 Label slow_case_no_pop;
duke@0 3157 Label done;
duke@0 3158 Label initialize_header;
duke@0 3159 Label initialize_object; // including clearing the fields
duke@0 3160 Label allocate_shared;
duke@0 3161
duke@0 3162 __ get_cpool_and_tags(rcx, rax);
bobv@1605 3163
bobv@1605 3164 // Make sure the class we're about to instantiate has been resolved.
bobv@1605 3165 // This is done before loading instanceKlass to be consistent with the order
bobv@1605 3166 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
bobv@1605 3167 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
bobv@1605 3168 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
bobv@1605 3169 __ jcc(Assembler::notEqual, slow_case_no_pop);
bobv@1605 3170
duke@0 3171 // get instanceKlass
never@304 3172 __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
never@304 3173 __ push(rcx); // save the contexts of klass for initializing the header
duke@0 3174
duke@0 3175 // make sure klass is initialized & doesn't have finalizer
duke@0 3176 // make sure klass is fully initialized
duke@0 3177 __ cmpl(Address(rcx, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
duke@0 3178 __ jcc(Assembler::notEqual, slow_case);
duke@0 3179
duke@0 3180 // get instance_size in instanceKlass (scaled to a count of bytes)
duke@0 3181 __ movl(rdx, Address(rcx, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
duke@0 3182 // test to see if it has a finalizer or is malformed in some way
duke@0 3183 __ testl(rdx, Klass::_lh_instance_slow_path_bit);
duke@0 3184 __ jcc(Assembler::notZero, slow_case);
duke@0 3185
duke@0 3186 //
duke@0 3187 // Allocate the instance
duke@0 3188 // 1) Try to allocate in the TLAB
duke@0 3189 // 2) if fail and the object is large allocate in the shared Eden
duke@0 3190 // 3) if the above fails (or is not applicable), go to a slow case
duke@0 3191 // (creates a new TLAB, etc.)
duke@0 3192
duke@0 3193 const bool allow_shared_alloc =
duke@0 3194 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
duke@0 3195
duke@0 3196 if (UseTLAB) {
duke@0 3197 const Register thread = rcx;
duke@0 3198
duke@0 3199 __ get_thread(thread);
never@304 3200 __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
never@304 3201 __ lea(rbx, Address(rax, rdx, Address::times_1));
never@304 3202 __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
duke@0 3203 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
never@304 3204 __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
duke@0 3205 if (ZeroTLAB) {
duke@0 3206 // the fields have been already cleared
duke@0 3207 __ jmp(initialize_header);
duke@0 3208 } else {
duke@0 3209 // initialize both the header and fields
duke@0 3210 __ jmp(initialize_object);
duke@0 3211 }
duke@0 3212 }
duke@0 3213
duke@0 3214 // Allocation in the shared Eden, if allowed.
duke@0 3215 //
duke@0 3216 // rdx: instance size in bytes
duke@0 3217 if (allow_shared_alloc) {
duke@0 3218 __ bind(allocate_shared);
duke@0 3219
ysr@342 3220 ExternalAddress heap_top((address)Universe::heap()->top_addr());
ysr@342 3221
duke@0 3222 Label retry;
duke@0 3223 __ bind(retry);
never@304 3224 __ movptr(rax, heap_top);
never@304 3225 __ lea(rbx, Address(rax, rdx, Address::times_1));
never@304 3226 __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
duke@0 3227 __ jcc(Assembler::above, slow_case);
duke@0 3228
duke@0 3229 // Compare rax, with the top addr, and if still equal, store the new
duke@0 3230 // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
duke@0 3231 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
duke@0 3232 //
duke@0 3233 // rax,: object begin
duke@0 3234 // rbx,: object end
duke@0 3235 // rdx: instance size in bytes
never@304 3236 __ locked_cmpxchgptr(rbx, heap_top);
duke@0 3237
duke@0 3238 // if someone beat us on the allocation, try again, otherwise continue
duke@0 3239 __ jcc(Assembler::notEqual, retry);
duke@0 3240 }
duke@0 3241
duke@0 3242 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
duke@0 3243 // The object is initialized before the header. If the object size is
duke@0 3244 // zero, go directly to the header initialization.
duke@0 3245 __ bind(initialize_object);
duke@0 3246 __ decrement(rdx, sizeof(oopDesc));
duke@0 3247 __ jcc(Assembler::zero, initialize_header);
duke@0 3248
duke@0 3249 // Initialize topmost object field, divide rdx by 8, check if odd and
duke@0 3250 // test if zero.
duke@0 3251 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code)
duke@0 3252 __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
duke@0 3253
duke@0 3254 // rdx must have been multiple of 8
duke@0 3255 #ifdef ASSERT
duke@0 3256 // make sure rdx was multiple of 8
duke@0 3257 Label L;
duke@0 3258 // Ignore partial flag stall after shrl() since it is debug VM
duke@0 3259 __ jccb(Assembler::carryClear, L);
duke@0 3260 __ stop("object size is not multiple of 2 - adjust this code");
duke@0 3261 __ bind(L);
duke@0 3262 // rdx must be > 0, no extra check needed here
duke@0 3263 #endif
duke@0 3264
duke@0 3265 // initialize remaining object fields: rdx was a multiple of 8
duke@0 3266 { Label loop;
duke@0 3267 __ bind(loop);
never@304 3268 __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
never@304 3269 NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
duke@0 3270 __ decrement(rdx);
duke@0 3271 __ jcc(Assembler::notZero, loop);
duke@0 3272 }
duke@0 3273
duke@0 3274 // initialize object header only.
duke@0 3275 __ bind(initialize_header);
duke@0 3276 if (UseBiasedLocking) {
never@304 3277 __ pop(rcx); // get saved klass back in the register.
never@304 3278 __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
never@304 3279 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
duke@0 3280 } else {
never@304 3281 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
never@304 3282 (int32_t)markOopDesc::prototype()); // header
never@304 3283 __ pop(rcx); // get saved klass back in the register.
duke@0 3284 }
never@304 3285 __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
duke@0 3286
duke@0 3287 {
duke@0 3288 SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
duke@0 3289 // Trigger dtrace event for fastpath
duke@0 3290 __ push(atos);
duke@0 3291 __ call_VM_leaf(
duke@0 3292 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
duke@0 3293 __ pop(atos);
duke@0 3294 }
duke@0 3295
duke@0 3296 __ jmp(done);
duke@0 3297 }
duke@0 3298
duke@0 3299 // slow case
duke@0 3300 __ bind(slow_case);
never@304 3301 __ pop(rcx); // restore stack pointer to what it was when we came in.
bobv@1605 3302 __ bind(slow_case_no_pop);
duke@0 3303 __ get_constant_pool(rax);
duke@0 3304 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
duke@0 3305 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
duke@0 3306
duke@0 3307 // continue
duke@0 3308 __ bind(done);
duke@0 3309 }
duke@0 3310
duke@0 3311
duke@0 3312 void TemplateTable::newarray() {
duke@0 3313 transition(itos, atos);
duke@0 3314 __ push_i(rax); // make sure everything is on the stack
duke@0 3315 __ load_unsigned_byte(rdx, at_bcp(1));
duke@0 3316 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
duke@0 3317 __ pop_i(rdx); // discard size
duke@0 3318 }
duke@0 3319
duke@0 3320
duke@0 3321 void TemplateTable::anewarray() {
duke@0 3322 transition(itos, atos);
duke@0 3323 __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
duke@0 3324 __ get_constant_pool(rcx);
duke@0 3325 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
duke@0 3326 }
duke@0 3327
duke@0 3328
duke@0 3329 void TemplateTable::arraylength() {
duke@0 3330 transition(atos, itos);
duke@0 3331 __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
duke@0 3332 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
duke@0 3333 }
duke@0 3334
duke@0 3335
duke@0 3336 void TemplateTable::checkcast() {
duke@0 3337 transition(atos, atos);
duke@0 3338 Label done, is_null, ok_is_subtype, quicked, resolved;
never@304 3339 __ testptr(rax, rax); // Object is in EAX
duke@0 3340 __ jcc(Assembler::zero, is_null);
duke@0 3341
duke@0 3342 // Get cpool & tags index
duke@0 3343 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
duke@0 3344 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
duke@0 3345 // See if bytecode has already been quicked
duke@0 3346 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
duke@0 3347 __ jcc(Assembler::equal, quicked);
duke@0 3348
duke@0 3349 __ push(atos);
duke@0 3350 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
duke@0 3351 __ pop_ptr(rdx);
duke@0 3352 __ jmpb(resolved);
duke@0 3353
duke@0 3354 // Get superklass in EAX and subklass in EBX
duke@0 3355 __ bind(quicked);
never@304 3356 __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
never@304 3357 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
duke@0 3358
duke@0 3359 __ bind(resolved);
never@304 3360 __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
duke@0 3361
duke@0 3362 // Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
duke@0 3363 // Superklass in EAX. Subklass in EBX.
duke@0 3364 __ gen_subtype_check( rbx, ok_is_subtype );
duke@0 3365
duke@0 3366 // Come here on failure
never@304 3367 __ push(rdx);
duke@0 3368 // object is at TOS
duke@0 3369 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
duke@0 3370
duke@0 3371 // Come here on success
duke@0 3372 __ bind(ok_is_subtype);
never@304 3373 __ mov(rax,rdx); // Restore object in EDX
duke@0 3374
duke@0 3375 // Collect counts on whether this check-cast sees NULLs a lot or not.
duke@0 3376 if (ProfileInterpreter) {
duke@0 3377 __ jmp(done);
duke@0 3378 __ bind(is_null);
duke@0 3379 __ profile_null_seen(rcx);
duke@0 3380 } else {
duke@0 3381 __ bind(is_null); // same as 'done'
duke@0 3382 }
duke@0 3383 __ bind(done);
duke@0 3384 }
duke@0 3385
duke@0 3386
duke@0 3387 void TemplateTable::instanceof() {
duke@0 3388 transition(atos, itos);
duke@0 3389 Label done, is_null, ok_is_subtype, quicked, resolved;
never@304 3390 __ testptr(rax, rax);
duke@0 3391 __ jcc(Assembler::zero, is_null);
duke@0 3392
duke@0 3393 // Get cpool & tags index
duke@0 3394 __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
duke@0 3395 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
duke@0 3396 // See if bytecode has already been quicked
duke@0 3397 __ cmpb(Address(rdx, rbx, Address::times_1, typeArrayOopDesc::header_size(T_BYTE) * wordSize), JVM_CONSTANT_Class);
duke@0 3398 __ jcc(Assembler::equal, quicked);
duke@0 3399
duke@0 3400 __ push(atos);
duke@0 3401 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
duke@0 3402 __ pop_ptr(rdx);
never@304 3403 __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
duke@0 3404 __ jmp(resolved);
duke@0 3405
duke@0 3406 // Get superklass in EAX and subklass in EDX
duke@0 3407 __ bind(quicked);
never@304 3408 __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
never@304 3409 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
duke@0 3410
duke@0 3411 __ bind(resolved);
duke@0 3412
duke@0 3413 // Generate subtype check. Blows ECX. Resets EDI.
duke@0 3414 // Superklass in EAX. Subklass in EDX.
duke@0 3415 __ gen_subtype_check( rdx, ok_is_subtype );
duke@0 3416
duke@0 3417 // Come here on failure
duke@0 3418 __ xorl(rax,rax);
duke@0 3419 __ jmpb(done);
duke@0 3420 // Come here on success
duke@0 3421 __ bind(ok_is_subtype);
duke@0 3422 __ movl(rax, 1);
duke@0 3423
duke@0 3424 // Collect counts on whether this test sees NULLs a lot or not.
duke@0 3425 if (ProfileInterpreter) {
duke@0 3426 __ jmp(done);
duke@0 3427 __ bind(is_null);
duke@0 3428 __ profile_null_seen(rcx);
duke@0 3429 } else {
duke@0 3430 __ bind(is_null); // same as 'done'
duke@0 3431 }
duke@0 3432 __ bind(done);
duke@0 3433 // rax, = 0: obj == NULL or obj is not an instanceof the specified klass
duke@0 3434 // rax, = 1: obj != NULL and obj is an instanceof the specified klass
duke@0 3435 }
duke@0 3436
duke@0 3437
duke@0 3438 //----------------------------------------------------------------------------------------------------
duke@0 3439 // Breakpoints
duke@0 3440 void TemplateTable::_breakpoint() {
duke@0 3441
duke@0 3442 // Note: We get here even if we are single stepping..
duke@0 3443 // jbug inists on setting breakpoints at every bytecode
duke@0 3444 // even if we are in single step mode.
duke@0 3445
duke@0 3446 transition(vtos, vtos);
duke@0 3447
duke@0 3448 // get the unpatched byte code
duke@0 3449 __ get_method(rcx);
duke@0 3450 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
never@304 3451 __ mov(rbx, rax);
duke@0 3452
duke@0 3453 // post the breakpoint event
duke@0 3454 __ get_method(rcx);
duke@0 3455 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
duke@0 3456
duke@0 3457 // complete the execution of original bytecode
duke@0 3458 __ dispatch_only_normal(vtos);
duke@0 3459 }
duke@0 3460
duke@0 3461
duke@0 3462 //----------------------------------------------------------------------------------------------------
duke@0 3463 // Exceptions
duke@0 3464
duke@0 3465 void TemplateTable::athrow() {
duke@0 3466 transition(atos, vtos);
duke@0 3467 __ null_check(rax);
duke@0 3468 __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
duke@0 3469 }
duke@0 3470
duke@0 3471
duke@0 3472 //----------------------------------------------------------------------------------------------------
duke@0 3473 // Synchronization
duke@0 3474 //
duke@0 3475 // Note: monitorenter & exit are symmetric routines; which is reflected
duke@0 3476 // in the assembly code structure as well
duke@0 3477 //
duke@0 3478 // Stack layout:
duke@0 3479 //
duke@0 3480 // [expressions ] <--- rsp = expression stack top
duke@0 3481 // ..
duke@0 3482 // [expressions ]
duke@0 3483 // [monitor entry] <--- monitor block top = expression stack bot
duke@0 3484 // ..
duke@0 3485 // [monitor entry]
duke@0 3486 // [frame data ] <--- monitor block bot
duke@0 3487 // ...
duke@0 3488 // [saved rbp, ] <--- rbp,
duke@0 3489
duke@0 3490
duke@0 3491 void TemplateTable::monitorenter() {
duke@0 3492 transition(atos, vtos);
duke@0 3493
duke@0 3494 // check for NULL object
duke@0 3495 __ null_check(rax);
duke@0 3496
duke@0 3497 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
duke@0 3498 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
duke@0 3499 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
duke@0 3500 Label allocated;
duke@0 3501
duke@0 3502 // initialize entry pointer
duke@0 3503 __ xorl(rdx, rdx); // points to free slot or NULL
duke@0 3504
duke@0 3505 // find a free slot in the monitor block (result in rdx)
duke@0 3506 { Label entry, loop, exit;
never@304 3507 __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
never@304 3508 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
duke@0 3509 __ jmpb(entry);
duke@0 3510
duke@0 3511 __ bind(loop);
never@304 3512 __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
duke@0 3513
duke@0 3514 // TODO - need new func here - kbt
duke@0 3515 if (VM_Version::supports_cmov()) {
never@304 3516 __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
duke@0 3517 } else {
duke@0 3518 Label L;
duke@0 3519 __ jccb(Assembler::notEqual, L);
never@304 3520 __ mov(rdx, rcx); // if not used then remember entry in rdx
duke@0 3521 __ bind(L);
duke@0 3522 }
never@304 3523 __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
never@304 3524 __ jccb(Assembler::equal, exit); // if same object then stop searching
never@304 3525 __ addptr(rcx, entry_size); // otherwise advance to next entry
duke@0 3526 __ bind(entry);
never@304 3527 __ cmpptr(rcx, rbx); // check if bottom reached
duke@0 3528 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
duke@0 3529 __ bind(exit);
duke@0 3530 }
duke@0 3531
never@304 3532 __ testptr(rdx, rdx); // check if a slot has been found
never@304 3533 __ jccb(Assembler::notZero, allocated); // if found, continue with that one
duke@0 3534
duke@0 3535 // allocate one if there's no free slot
duke@0 3536 { Label entry, loop;
duke@0 3537 // 1. compute new pointers // rsp: old expression stack top
never@304 3538 __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
never@304 3539 __ subptr(rsp, entry_size); // move expression stack top
never@304 3540 __ subptr(rdx, entry_size); // move expression stack bottom
never@304 3541 __ mov(rcx, rsp); // set start value for copy loop
never@304 3542 __ movptr(monitor_block_bot, rdx); // set new monitor block top
duke@0 3543 __ jmp(entry);
duke@0 3544 // 2. move expression stack contents
duke@0 3545 __ bind(loop);
never@304 3546 __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
never@304 3547 __ movptr(Address(rcx, 0), rbx); // and store it at new location
never@304 3548 __ addptr(rcx, wordSize); // advance to next word
duke@0 3549 __ bind(entry);
never@304 3550 __ cmpptr(rcx, rdx); // check if bottom reached
duke@0 3551 __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
duke@0 3552 }
duke@0 3553
duke@0 3554 // call run-time routine
duke@0 3555 // rdx: points to monitor entry
duke@0 3556 __ bind(allocated);
duke@0 3557
duke@0 3558 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
duke@0 3559 // The object has already been poped from the stack, so the expression stack looks correct.
duke@0 3560 __ increment(rsi);
duke@0 3561
never@304 3562 __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
duke@0 3563 __ lock_object(rdx);
duke@0 3564
duke@0 3565 // check to make sure this monitor doesn't cause stack overflow after locking
duke@0 3566 __ save_bcp(); // in case of exception
duke@0 3567 __ generate_stack_overflow_check(0);
duke@0 3568
duke@0 3569 // The bcp has already been incremented. Just need to dispatch to next instruction.
duke@0 3570 __ dispatch_next(vtos);
duke@0 3571 }
duke@0 3572
duke@0 3573
duke@0 3574 void TemplateTable::monitorexit() {
duke@0 3575 transition(atos, vtos);
duke@0 3576
duke@0 3577 // check for NULL object
duke@0 3578 __ null_check(rax);
duke@0 3579
duke@0 3580 const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
duke@0 3581 const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
duke@0 3582 const int entry_size = ( frame::interpreter_frame_monitor_size() * wordSize);
duke@0 3583 Label found;
duke@0 3584
duke@0 3585 // find matching slot
duke@0 3586 { Label entry, loop;
never@304 3587 __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
never@304 3588 __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
duke@0 3589 __ jmpb(entry);
duke@0 3590
duke@0 3591 __ bind(loop);
never@304 3592 __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
duke@0 3593 __ jcc(Assembler::equal, found); // if same object then stop searching
never@304 3594 __ addptr(rdx, entry_size); // otherwise advance to next entry
duke@0 3595 __ bind(entry);
never@304 3596 __ cmpptr(rdx, rbx); // check if bottom reached
duke@0 3597 __ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
duke@0 3598 }