annotate src/cpu/x86/vm/templateTable_x86.cpp @ 13581:c6b48833776d

8187625: [MVT] Interpreter cleanup Reviewed-by: dsimms
author fparain
date Mon, 18 Sep 2017 10:34:11 -0400
parents bc075a2f87ad
children
rev   line source
duke@0 1 /*
mgronlun@12735 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
twisti@3883 26 #include "asm/macroAssembler.hpp"
stefank@1879 27 #include "interpreter/interpreter.hpp"
stefank@1879 28 #include "interpreter/interpreterRuntime.hpp"
goetz@6760 29 #include "interpreter/interp_masm.hpp"
stefank@1879 30 #include "interpreter/templateTable.hpp"
stefank@1879 31 #include "memory/universe.inline.hpp"
coleenp@3602 32 #include "oops/methodData.hpp"
stefank@1879 33 #include "oops/objArrayKlass.hpp"
stefank@1879 34 #include "oops/oop.inline.hpp"
stefank@1879 35 #include "prims/methodHandles.hpp"
stefank@1879 36 #include "runtime/sharedRuntime.hpp"
stefank@1879 37 #include "runtime/stubRoutines.hpp"
stefank@1879 38 #include "runtime/synchronizer.hpp"
jprovino@4107 39 #include "utilities/macros.hpp"
duke@0 40
duke@0 41 #define __ _masm->
duke@0 42
mockner@7970 43 // Global Register Names
coleenp@9669 44 static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
coleenp@9669 45 static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
mockner@7970 46
duke@0 47 // Platform-dependent initialization
duke@0 48 void TemplateTable::pd_initialize() {
mockner@7970 49 // No x86 specific initialization
duke@0 50 }
duke@0 51
mockner@7970 52 // Address Computation: local variables
duke@0 53 static inline Address iaddress(int n) {
mockner@7970 54 return Address(rlocals, Interpreter::local_offset_in_bytes(n));
duke@0 55 }
duke@0 56
duke@0 57 static inline Address laddress(int n) {
duke@0 58 return iaddress(n + 1);
duke@0 59 }
duke@0 60
mockner@7970 61 #ifndef _LP64
mockner@7970 62 static inline Address haddress(int n) {
mockner@7970 63 return iaddress(n + 0);
mockner@7970 64 }
mockner@7970 65 #endif
mockner@7970 66
duke@0 67 static inline Address faddress(int n) {
duke@0 68 return iaddress(n);
duke@0 69 }
duke@0 70
duke@0 71 static inline Address daddress(int n) {
duke@0 72 return laddress(n);
duke@0 73 }
duke@0 74
duke@0 75 static inline Address aaddress(int n) {
duke@0 76 return iaddress(n);
duke@0 77 }
duke@0 78
duke@0 79 static inline Address iaddress(Register r) {
mockner@7970 80 return Address(rlocals, r, Address::times_ptr);
duke@0 81 }
duke@0 82
duke@0 83 static inline Address laddress(Register r) {
mockner@7970 84 return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
duke@0 85 }
duke@0 86
mockner@7970 87 #ifndef _LP64
mockner@7970 88 static inline Address haddress(Register r) {
mockner@7970 89 return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
mockner@7970 90 }
mockner@7970 91 #endif
mockner@7970 92
duke@0 93 static inline Address faddress(Register r) {
duke@0 94 return iaddress(r);
duke@0 95 }
duke@0 96
duke@0 97 static inline Address daddress(Register r) {
duke@0 98 return laddress(r);
duke@0 99 }
duke@0 100
duke@0 101 static inline Address aaddress(Register r) {
duke@0 102 return iaddress(r);
duke@0 103 }
duke@0 104
mockner@7970 105
mockner@7970 106 // expression stack
mockner@7970 107 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
mockner@7970 108 // data beyond the rsp which is potentially unsafe in an MT environment;
mockner@7970 109 // an interrupt may overwrite that data.)
mockner@7970 110 static inline Address at_rsp () {
duke@0 111 return Address(rsp, 0);
duke@0 112 }
duke@0 113
duke@0 114 // At top of Java expression stack which may be different than esp(). It
duke@0 115 // isn't for category 1 objects.
duke@0 116 static inline Address at_tos () {
duke@0 117 return Address(rsp, Interpreter::expr_offset_in_bytes(0));
duke@0 118 }
duke@0 119
duke@0 120 static inline Address at_tos_p1() {
duke@0 121 return Address(rsp, Interpreter::expr_offset_in_bytes(1));
duke@0 122 }
duke@0 123
duke@0 124 static inline Address at_tos_p2() {
duke@0 125 return Address(rsp, Interpreter::expr_offset_in_bytes(2));
duke@0 126 }
duke@0 127
duke@0 128 // Condition conversion
duke@0 129 static Assembler::Condition j_not(TemplateTable::Condition cc) {
duke@0 130 switch (cc) {
duke@0 131 case TemplateTable::equal : return Assembler::notEqual;
duke@0 132 case TemplateTable::not_equal : return Assembler::equal;
duke@0 133 case TemplateTable::less : return Assembler::greaterEqual;
duke@0 134 case TemplateTable::less_equal : return Assembler::greater;
duke@0 135 case TemplateTable::greater : return Assembler::lessEqual;
duke@0 136 case TemplateTable::greater_equal: return Assembler::less;
duke@0 137 }
duke@0 138 ShouldNotReachHere();
duke@0 139 return Assembler::zero;
duke@0 140 }
duke@0 141
duke@0 142
mockner@7970 143
duke@0 144 // Miscelaneous helper routines
ysr@342 145 // Store an oop (or NULL) at the address described by obj.
ysr@342 146 // If val == noreg this means store a NULL
ysr@342 147
mockner@7970 148
ysr@342 149 static void do_oop_store(InterpreterMacroAssembler* _masm,
ysr@342 150 Address obj,
ysr@342 151 Register val,
ysr@342 152 BarrierSet::Name barrier,
ysr@342 153 bool precise) {
ysr@342 154 assert(val == noreg || val == rax, "parameter is just for looks");
ysr@342 155 switch (barrier) {
jprovino@4107 156 #if INCLUDE_ALL_GCS
ysr@342 157 case BarrierSet::G1SATBCTLogging:
ysr@342 158 {
ysr@342 159 // flatten object address if needed
mockner@7970 160 // We do it regardless of precise because we need the registers
ysr@342 161 if (obj.index() == noreg && obj.disp() == 0) {
ysr@342 162 if (obj.base() != rdx) {
mockner@7970 163 __ movptr(rdx, obj.base());
ysr@342 164 }
ysr@342 165 } else {
mockner@7970 166 __ lea(rdx, obj);
ysr@342 167 }
mockner@7970 168
mockner@7970 169 Register rtmp = LP64_ONLY(r8) NOT_LP64(rsi);
mockner@7970 170 Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
mockner@7970 171
mockner@7970 172 NOT_LP64(__ get_thread(rcx));
mockner@7970 173 NOT_LP64(__ save_bcp());
mockner@7970 174
johnc@2346 175 __ g1_write_barrier_pre(rdx /* obj */,
johnc@2346 176 rbx /* pre_val */,
mockner@7970 177 rthread /* thread */,
mockner@7970 178 rtmp /* tmp */,
johnc@2346 179 val != noreg /* tosca_live */,
johnc@2346 180 false /* expand_call */);
ysr@342 181 if (val == noreg) {
johnc@1047 182 __ store_heap_oop_null(Address(rdx, 0));
ysr@342 183 } else {
johnc@4498 184 // G1 barrier needs uncompressed oop for region cross check.
johnc@4498 185 Register new_val = val;
johnc@4498 186 if (UseCompressedOops) {
johnc@4498 187 new_val = rbx;
johnc@4498 188 __ movptr(new_val, val);
johnc@4498 189 }
ysr@342 190 __ store_heap_oop(Address(rdx, 0), val);
johnc@2346 191 __ g1_write_barrier_post(rdx /* store_adr */,
johnc@4498 192 new_val /* new_val */,
mockner@7970 193 rthread /* thread */,
mockner@7970 194 rtmp /* tmp */,
johnc@2346 195 rbx /* tmp2 */);
ysr@342 196 }
mockner@7970 197 NOT_LP64( __ restore_bcp());
ysr@342 198 }
ysr@342 199 break;
jprovino@4107 200 #endif // INCLUDE_ALL_GCS
kbarrett@8925 201 case BarrierSet::CardTableForRS:
ysr@342 202 case BarrierSet::CardTableExtension:
ysr@342 203 {
ysr@342 204 if (val == noreg) {
johnc@1047 205 __ store_heap_oop_null(obj);
ysr@342 206 } else {
ysr@342 207 __ store_heap_oop(obj, val);
ysr@342 208 // flatten object address if needed
ysr@342 209 if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
ysr@342 210 __ store_check(obj.base());
ysr@342 211 } else {
mockner@7970 212 __ lea(rdx, obj);
ysr@342 213 __ store_check(rdx);
ysr@342 214 }
ysr@342 215 }
ysr@342 216 }
ysr@342 217 break;
ysr@342 218 case BarrierSet::ModRef:
ysr@342 219 if (val == noreg) {
johnc@1047 220 __ store_heap_oop_null(obj);
ysr@342 221 } else {
ysr@342 222 __ store_heap_oop(obj, val);
ysr@342 223 }
ysr@342 224 break;
ysr@342 225 default :
ysr@342 226 ShouldNotReachHere();
ysr@342 227
ysr@342 228 }
ysr@342 229 }
duke@0 230
duke@0 231 Address TemplateTable::at_bcp(int offset) {
duke@0 232 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
mockner@7970 233 return Address(rbcp, offset);
duke@0 234 }
duke@0 235
mockner@7970 236
twisti@2615 237 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
twisti@2615 238 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
twisti@2615 239 int byte_no) {
twisti@2615 240 if (!RewriteBytecodes) return;
twisti@2615 241 Label L_patch_done;
twisti@2615 242
twisti@2615 243 switch (bc) {
thartmann@13007 244 case Bytecodes::_fast_qputfield:
thartmann@13007 245 __ jmp(L_patch_done); // don't patch yet
thartmann@13007 246 break;
twisti@2615 247 case Bytecodes::_fast_aputfield:
twisti@2615 248 case Bytecodes::_fast_bputfield:
coleenp@10885 249 case Bytecodes::_fast_zputfield:
twisti@2615 250 case Bytecodes::_fast_cputfield:
twisti@2615 251 case Bytecodes::_fast_dputfield:
twisti@2615 252 case Bytecodes::_fast_fputfield:
twisti@2615 253 case Bytecodes::_fast_iputfield:
twisti@2615 254 case Bytecodes::_fast_lputfield:
twisti@2615 255 case Bytecodes::_fast_sputfield:
twisti@2615 256 {
twisti@2615 257 // We skip bytecode quickening for putfield instructions when
twisti@2615 258 // the put_code written to the constant pool cache is zero.
twisti@2615 259 // This is required so that every execution of this instruction
twisti@2615 260 // calls out to InterpreterRuntime::resolve_get_put to do
twisti@2615 261 // additional, required work.
twisti@2615 262 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
twisti@2615 263 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
twisti@2615 264 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
twisti@2615 265 __ movl(bc_reg, bc);
twisti@2615 266 __ cmpl(temp_reg, (int) 0);
twisti@2615 267 __ jcc(Assembler::zero, L_patch_done); // don't patch
twisti@2615 268 }
twisti@2615 269 break;
twisti@2615 270 default:
twisti@2615 271 assert(byte_no == -1, "sanity");
twisti@2615 272 // the pair bytecodes have already done the load.
twisti@2615 273 if (load_bc_into_bc_reg) {
twisti@2615 274 __ movl(bc_reg, bc);
twisti@2615 275 }
duke@0 276 }
twisti@2615 277
twisti@2615 278 if (JvmtiExport::can_post_breakpoint()) {
twisti@2615 279 Label L_fast_patch;
twisti@2615 280 // if a breakpoint is present we can't rewrite the stream directly
twisti@2615 281 __ movzbl(temp_reg, at_bcp(0));
twisti@2615 282 __ cmpl(temp_reg, Bytecodes::_breakpoint);
twisti@2615 283 __ jcc(Assembler::notEqual, L_fast_patch);
twisti@2615 284 __ get_method(temp_reg);
twisti@2615 285 // Let breakpoint table handling rewrite to quicker bytecode
mockner@7970 286 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
twisti@2615 287 #ifndef ASSERT
twisti@2615 288 __ jmpb(L_patch_done);
twisti@2615 289 #else
twisti@2615 290 __ jmp(L_patch_done);
twisti@2615 291 #endif
twisti@2615 292 __ bind(L_fast_patch);
duke@0 293 }
twisti@2615 294
twisti@2615 295 #ifdef ASSERT
twisti@2615 296 Label L_okay;
twisti@2615 297 __ load_unsigned_byte(temp_reg, at_bcp(0));
twisti@2615 298 __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
twisti@2615 299 __ jcc(Assembler::equal, L_okay);
twisti@2615 300 __ cmpl(temp_reg, bc_reg);
twisti@2615 301 __ jcc(Assembler::equal, L_okay);
twisti@2615 302 __ stop("patching the wrong bytecode");
twisti@2615 303 __ bind(L_okay);
twisti@1108 304 #endif
twisti@2615 305
duke@0 306 // patch bytecode
twisti@2615 307 __ movb(at_bcp(0), bc_reg);
twisti@2615 308 __ bind(L_patch_done);
duke@0 309 }
duke@0 310 // Individual instructions
duke@0 311
mockner@7970 312
duke@0 313 void TemplateTable::nop() {
duke@0 314 transition(vtos, vtos);
duke@0 315 // nothing to do
duke@0 316 }
duke@0 317
duke@0 318 void TemplateTable::shouldnotreachhere() {
duke@0 319 transition(vtos, vtos);
duke@0 320 __ stop("shouldnotreachhere bytecode");
duke@0 321 }
duke@0 322
duke@0 323 void TemplateTable::aconst_null() {
duke@0 324 transition(vtos, atos);
duke@0 325 __ xorl(rax, rax);
duke@0 326 }
duke@0 327
duke@0 328 void TemplateTable::iconst(int value) {
duke@0 329 transition(vtos, itos);
duke@0 330 if (value == 0) {
duke@0 331 __ xorl(rax, rax);
duke@0 332 } else {
duke@0 333 __ movl(rax, value);
duke@0 334 }
duke@0 335 }
duke@0 336
duke@0 337 void TemplateTable::lconst(int value) {
duke@0 338 transition(vtos, ltos);
duke@0 339 if (value == 0) {
duke@0 340 __ xorl(rax, rax);
duke@0 341 } else {
duke@0 342 __ movl(rax, value);
duke@0 343 }
mockner@7970 344 #ifndef _LP64
mockner@7970 345 assert(value >= 0, "check this code");
mockner@7970 346 __ xorptr(rdx, rdx);
mockner@7970 347 #endif
duke@0 348 }
duke@0 349
mockner@7970 350
mockner@7970 351
duke@0 352 void TemplateTable::fconst(int value) {
duke@0 353 transition(vtos, ftos);
zmajo@8879 354 if (UseSSE >= 1) {
zmajo@8879 355 static float one = 1.0f, two = 2.0f;
zmajo@8879 356 switch (value) {
zmajo@8879 357 case 0:
zmajo@8879 358 __ xorps(xmm0, xmm0);
zmajo@8879 359 break;
zmajo@8879 360 case 1:
zmajo@8879 361 __ movflt(xmm0, ExternalAddress((address) &one));
zmajo@8879 362 break;
zmajo@8879 363 case 2:
zmajo@8879 364 __ movflt(xmm0, ExternalAddress((address) &two));
zmajo@8879 365 break;
zmajo@8879 366 default:
zmajo@8879 367 ShouldNotReachHere();
zmajo@8879 368 break;
zmajo@8879 369 }
zmajo@8879 370 } else {
mockner@7970 371 #ifdef _LP64
duke@0 372 ShouldNotReachHere();
zmajo@8879 373 #else
zmajo@8879 374 if (value == 0) { __ fldz();
zmajo@8879 375 } else if (value == 1) { __ fld1();
zmajo@8879 376 } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
zmajo@8879 377 } else { ShouldNotReachHere();
zmajo@8879 378 }
zmajo@8879 379 #endif // _LP64
duke@0 380 }
duke@0 381 }
duke@0 382
duke@0 383 void TemplateTable::dconst(int value) {
duke@0 384 transition(vtos, dtos);
zmajo@8879 385 if (UseSSE >= 2) {
zmajo@8879 386 static double one = 1.0;
zmajo@8879 387 switch (value) {
zmajo@8879 388 case 0:
zmajo@8879 389 __ xorpd(xmm0, xmm0);
zmajo@8879 390 break;
zmajo@8879 391 case 1:
zmajo@8879 392 __ movdbl(xmm0, ExternalAddress((address) &one));
zmajo@8879 393 break;
zmajo@8879 394 default:
zmajo@8879 395 ShouldNotReachHere();
zmajo@8879 396 break;
zmajo@8879 397 }
zmajo@8879 398 } else {
mockner@7970 399 #ifdef _LP64
duke@0 400 ShouldNotReachHere();
zmajo@8879 401 #else
zmajo@8879 402 if (value == 0) { __ fldz();
zmajo@8879 403 } else if (value == 1) { __ fld1();
zmajo@8879 404 } else { ShouldNotReachHere();
zmajo@8879 405 }
zmajo@8879 406 #endif
duke@0 407 }
duke@0 408 }
duke@0 409
duke@0 410 void TemplateTable::bipush() {
duke@0 411 transition(vtos, itos);
duke@0 412 __ load_signed_byte(rax, at_bcp(1));
duke@0 413 }
duke@0 414
duke@0 415 void TemplateTable::sipush() {
duke@0 416 transition(vtos, itos);
jrose@622 417 __ load_unsigned_short(rax, at_bcp(1));
duke@0 418 __ bswapl(rax);
duke@0 419 __ sarl(rax, 16);
duke@0 420 }
duke@0 421
duke@0 422 void TemplateTable::ldc(bool wide) {
duke@0 423 transition(vtos, vtos);
mockner@7970 424 Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
duke@0 425 Label call_ldc, notFloat, notClass, Done;
duke@0 426
duke@0 427 if (wide) {
duke@0 428 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
duke@0 429 } else {
duke@0 430 __ load_unsigned_byte(rbx, at_bcp(1));
duke@0 431 }
duke@0 432
duke@0 433 __ get_cpool_and_tags(rcx, rax);
coleenp@3602 434 const int base_offset = ConstantPool::header_size() * wordSize;
coleenp@3602 435 const int tags_offset = Array<u1>::base_offset_in_bytes();
duke@0 436
duke@0 437 // get type
duke@0 438 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
duke@0 439
duke@0 440 // unresolved class - get the resolved class
duke@0 441 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
duke@0 442 __ jccb(Assembler::equal, call_ldc);
duke@0 443
duke@0 444 // unresolved class in error state - call into runtime to throw the error
duke@0 445 // from the first resolution attempt
duke@0 446 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
duke@0 447 __ jccb(Assembler::equal, call_ldc);
duke@0 448
duke@0 449 // resolved class - need to call vm to get java mirror of the class
duke@0 450 __ cmpl(rdx, JVM_CONSTANT_Class);
lfoltan@13052 451 __ jcc(Assembler::equal, call_ldc);
lfoltan@13052 452
lfoltan@13052 453 // unresolved value type - get the resolved class
lfoltan@13052 454 __ cmpl(rdx, JVM_CONSTANT_UnresolvedValue);
lfoltan@13052 455 __ jccb(Assembler::equal, call_ldc);
lfoltan@13052 456
lfoltan@13052 457 // unresolved value type in error state - call into runtime to throw the error
lfoltan@13052 458 // from the first resolution attempt
lfoltan@13052 459 __ cmpl(rdx, JVM_CONSTANT_UnresolvedValueInError);
lfoltan@13052 460 __ jccb(Assembler::equal, call_ldc);
lfoltan@13052 461
lfoltan@13052 462 // resolved value type - need to call vm to get java mirror
lfoltan@13052 463 __ cmpl(rdx, JVM_CONSTANT_Value);
duke@0 464 __ jcc(Assembler::notEqual, notClass);
duke@0 465
duke@0 466 __ bind(call_ldc);
mockner@7970 467
mockner@7970 468 __ movl(rarg, wide);
mockner@7970 469 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
mockner@7970 470
mockner@7970 471 __ push(atos);
duke@0 472 __ jmp(Done);
duke@0 473
duke@0 474 __ bind(notClass);
duke@0 475 __ cmpl(rdx, JVM_CONSTANT_Float);
duke@0 476 __ jccb(Assembler::notEqual, notFloat);
mockner@7970 477
duke@0 478 // ftos
zmajo@8879 479 __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
mockner@7970 480 __ push(ftos);
duke@0 481 __ jmp(Done);
duke@0 482
duke@0 483 __ bind(notFloat);
duke@0 484 #ifdef ASSERT
duke@0 485 {
duke@0 486 Label L;
duke@0 487 __ cmpl(rdx, JVM_CONSTANT_Integer);
duke@0 488 __ jcc(Assembler::equal, L);
coleenp@3602 489 // String and Object are rewritten to fast_aldc
duke@0 490 __ stop("unexpected tag type in ldc");
duke@0 491 __ bind(L);
duke@0 492 }
duke@0 493 #endif
coleenp@3602 494 // itos JVM_CONSTANT_Integer only
mockner@7970 495 __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
mockner@7970 496 __ push(itos);
duke@0 497 __ bind(Done);
duke@0 498 }
duke@0 499
jrose@1522 500 // Fast path for caching oop constants.
jrose@1522 501 void TemplateTable::fast_aldc(bool wide) {
jrose@1522 502 transition(vtos, atos);
jrose@1522 503
coleenp@3602 504 Register result = rax;
coleenp@3602 505 Register tmp = rdx;
coleenp@3602 506 int index_size = wide ? sizeof(u2) : sizeof(u1);
coleenp@3602 507
coleenp@3602 508 Label resolved;
coleenp@3602 509
coleenp@3602 510 // We are resolved if the resolved reference cache entry contains a
coleenp@3602 511 // non-null object (String, MethodType, etc.)
coleenp@3602 512 assert_different_registers(result, tmp);
coleenp@3602 513 __ get_cache_index_at_bcp(tmp, 1, index_size);
coleenp@3602 514 __ load_resolved_reference_at_index(result, tmp);
coleenp@3602 515 __ testl(result, result);
coleenp@3602 516 __ jcc(Assembler::notZero, resolved);
coleenp@3602 517
coleenp@3602 518 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
coleenp@3602 519
coleenp@3602 520 // first time invocation - must resolve first
coleenp@3602 521 __ movl(tmp, (int)bytecode());
coleenp@3602 522 __ call_VM(result, entry, tmp);
coleenp@3602 523
coleenp@3602 524 __ bind(resolved);
coleenp@3602 525
coleenp@3602 526 if (VerifyOops) {
coleenp@3602 527 __ verify_oop(result);
jrose@1522 528 }
jrose@1522 529 }
jrose@1522 530
duke@0 531 void TemplateTable::ldc2_w() {
duke@0 532 transition(vtos, vtos);
duke@0 533 Label Long, Done;
duke@0 534 __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
duke@0 535
duke@0 536 __ get_cpool_and_tags(rcx, rax);
coleenp@3602 537 const int base_offset = ConstantPool::header_size() * wordSize;
coleenp@3602 538 const int tags_offset = Array<u1>::base_offset_in_bytes();
duke@0 539
duke@0 540 // get type
duke@0 541 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset),
duke@0 542 JVM_CONSTANT_Double);
duke@0 543 __ jccb(Assembler::notEqual, Long);
mockner@7970 544
duke@0 545 // dtos
zmajo@8879 546 __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
mockner@7970 547 __ push(dtos);
mockner@7970 548
duke@0 549 __ jmpb(Done);
duke@0 550 __ bind(Long);
mockner@7970 551
duke@0 552 // ltos
mockner@7970 553 __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
mockner@7970 554 NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
mockner@7970 555 __ push(ltos);
duke@0 556
duke@0 557 __ bind(Done);
duke@0 558 }
duke@0 559
duke@0 560 void TemplateTable::locals_index(Register reg, int offset) {
duke@0 561 __ load_unsigned_byte(reg, at_bcp(offset));
never@304 562 __ negptr(reg);
duke@0 563 }
duke@0 564
duke@0 565 void TemplateTable::iload() {
minqi@8102 566 iload_internal();
minqi@8102 567 }
minqi@8102 568
minqi@8102 569 void TemplateTable::nofast_iload() {
minqi@8102 570 iload_internal(may_not_rewrite);
minqi@8102 571 }
minqi@8102 572
minqi@8102 573 void TemplateTable::iload_internal(RewriteControl rc) {
duke@0 574 transition(vtos, itos);
minqi@8102 575 if (RewriteFrequentPairs && rc == may_rewrite) {
duke@0 576 Label rewrite, done;
mockner@7970 577 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
mockner@7970 578 LP64_ONLY(assert(rbx != bc, "register damaged"));
duke@0 579
duke@0 580 // get next byte
duke@0 581 __ load_unsigned_byte(rbx,
duke@0 582 at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
duke@0 583 // if _iload, wait to rewrite to iload2. We only want to rewrite the
duke@0 584 // last two iloads in a pair. Comparing against fast_iload means that
duke@0 585 // the next bytecode is neither an iload or a caload, and therefore
duke@0 586 // an iload pair.
duke@0 587 __ cmpl(rbx, Bytecodes::_iload);
duke@0 588 __ jcc(Assembler::equal, done);
duke@0 589
duke@0 590 __ cmpl(rbx, Bytecodes::_fast_iload);
duke@0 591 __ movl(bc, Bytecodes::_fast_iload2);
mockner@7970 592
duke@0 593 __ jccb(Assembler::equal, rewrite);
duke@0 594
duke@0 595 // if _caload, rewrite to fast_icaload
duke@0 596 __ cmpl(rbx, Bytecodes::_caload);
duke@0 597 __ movl(bc, Bytecodes::_fast_icaload);
duke@0 598 __ jccb(Assembler::equal, rewrite);
duke@0 599
duke@0 600 // rewrite so iload doesn't check again.
duke@0 601 __ movl(bc, Bytecodes::_fast_iload);
duke@0 602
duke@0 603 // rewrite
duke@0 604 // bc: fast bytecode
duke@0 605 __ bind(rewrite);
duke@0 606 patch_bytecode(Bytecodes::_iload, bc, rbx, false);
duke@0 607 __ bind(done);
duke@0 608 }
duke@0 609
duke@0 610 // Get the local value into tos
duke@0 611 locals_index(rbx);
duke@0 612 __ movl(rax, iaddress(rbx));
duke@0 613 }
duke@0 614
duke@0 615 void TemplateTable::fast_iload2() {
duke@0 616 transition(vtos, itos);
duke@0 617 locals_index(rbx);
duke@0 618 __ movl(rax, iaddress(rbx));
duke@0 619 __ push(itos);
duke@0 620 locals_index(rbx, 3);
duke@0 621 __ movl(rax, iaddress(rbx));
duke@0 622 }
duke@0 623
duke@0 624 void TemplateTable::fast_iload() {
duke@0 625 transition(vtos, itos);
duke@0 626 locals_index(rbx);
duke@0 627 __ movl(rax, iaddress(rbx));
duke@0 628 }
duke@0 629
duke@0 630 void TemplateTable::lload() {
duke@0 631 transition(vtos, ltos);
duke@0 632 locals_index(rbx);
mockner@7970 633 __ movptr(rax, laddress(rbx));
mockner@7970 634 NOT_LP64(__ movl(rdx, haddress(rbx)));
duke@0 635 }
duke@0 636
duke@0 637 void TemplateTable::fload() {
duke@0 638 transition(vtos, ftos);
duke@0 639 locals_index(rbx);
zmajo@8879 640 __ load_float(faddress(rbx));
duke@0 641 }
duke@0 642
duke@0 643 void TemplateTable::dload() {
duke@0 644 transition(vtos, dtos);
duke@0 645 locals_index(rbx);
zmajo@8879 646 __ load_double(daddress(rbx));
duke@0 647 }
duke@0 648
duke@0 649 void TemplateTable::aload() {
duke@0 650 transition(vtos, atos);
duke@0 651 locals_index(rbx);
never@304 652 __ movptr(rax, aaddress(rbx));
duke@0 653 }
duke@0 654
thartmann@13007 655 void TemplateTable::vload() {
thartmann@13007 656 transition(vtos, qtos);
thartmann@13007 657 locals_index(rbx);
thartmann@13007 658 __ movptr(rax, aaddress(rbx));
thartmann@13007 659 }
thartmann@13007 660
duke@0 661 void TemplateTable::locals_index_wide(Register reg) {
mgerdin@5624 662 __ load_unsigned_short(reg, at_bcp(2));
duke@0 663 __ bswapl(reg);
duke@0 664 __ shrl(reg, 16);
never@304 665 __ negptr(reg);
duke@0 666 }
duke@0 667
duke@0 668 void TemplateTable::wide_iload() {
duke@0 669 transition(vtos, itos);
duke@0 670 locals_index_wide(rbx);
duke@0 671 __ movl(rax, iaddress(rbx));
duke@0 672 }
duke@0 673
duke@0 674 void TemplateTable::wide_lload() {
duke@0 675 transition(vtos, ltos);
duke@0 676 locals_index_wide(rbx);
mockner@7970 677 __ movptr(rax, laddress(rbx));
mockner@7970 678 NOT_LP64(__ movl(rdx, haddress(rbx)));
duke@0 679 }
duke@0 680
duke@0 681 void TemplateTable::wide_fload() {
duke@0 682 transition(vtos, ftos);
duke@0 683 locals_index_wide(rbx);
zmajo@8879 684 __ load_float(faddress(rbx));
duke@0 685 }
duke@0 686
duke@0 687 void TemplateTable::wide_dload() {
duke@0 688 transition(vtos, dtos);
duke@0 689 locals_index_wide(rbx);
zmajo@8879 690 __ load_double(daddress(rbx));
duke@0 691 }
duke@0 692
duke@0 693 void TemplateTable::wide_aload() {
duke@0 694 transition(vtos, atos);
duke@0 695 locals_index_wide(rbx);
never@304 696 __ movptr(rax, aaddress(rbx));
duke@0 697 }
duke@0 698
thartmann@13007 699 void TemplateTable::wide_vload() {
thartmann@13007 700 transition(vtos, qtos);
thartmann@13007 701 locals_index_wide(rbx);
thartmann@13007 702 __ movptr(rax, aaddress(rbx));
thartmann@13007 703 }
thartmann@13007 704
duke@0 705 void TemplateTable::index_check(Register array, Register index) {
mockner@7970 706 // Pop ptr into array
mockner@7970 707 __ pop_ptr(array);
mockner@7970 708 index_check_without_pop(array, index);
mockner@7970 709 }
mockner@7970 710
mockner@7970 711 void TemplateTable::index_check_without_pop(Register array, Register index) {
duke@0 712 // destroys rbx
duke@0 713 // check array
duke@0 714 __ null_check(array, arrayOopDesc::length_offset_in_bytes());
duke@0 715 // sign extend index for use by indexed load
never@304 716 __ movl2ptr(index, index);
duke@0 717 // check index
duke@0 718 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
duke@0 719 if (index != rbx) {
mockner@7970 720 // ??? convention: move aberrant index into rbx for exception message
duke@0 721 assert(rbx != array, "different registers");
duke@0 722 __ movl(rbx, index);
duke@0 723 }
duke@0 724 __ jump_cc(Assembler::aboveEqual,
duke@0 725 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
duke@0 726 }
duke@0 727
mockner@7970 728
duke@0 729 void TemplateTable::iaload() {
duke@0 730 transition(itos, itos);
mockner@7970 731 // rax: index
duke@0 732 // rdx: array
duke@0 733 index_check(rdx, rax); // kills rbx
duke@0 734 __ movl(rax, Address(rdx, rax,
duke@0 735 Address::times_4,
duke@0 736 arrayOopDesc::base_offset_in_bytes(T_INT)));
duke@0 737 }
duke@0 738
duke@0 739 void TemplateTable::laload() {
duke@0 740 transition(itos, ltos);
mockner@7970 741 // rax: index
duke@0 742 // rdx: array
duke@0 743 index_check(rdx, rax); // kills rbx
mockner@7970 744 NOT_LP64(__ mov(rbx, rax));
mockner@7970 745 // rbx,: index
mockner@7970 746 __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
mockner@7970 747 NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
duke@0 748 }
duke@0 749
mockner@7970 750
mockner@7970 751
duke@0 752 void TemplateTable::faload() {
duke@0 753 transition(itos, ftos);
mockner@7970 754 // rax: index
duke@0 755 // rdx: array
duke@0 756 index_check(rdx, rax); // kills rbx
zmajo@8879 757 __ load_float(Address(rdx, rax,
zmajo@8879 758 Address::times_4,
zmajo@8879 759 arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
duke@0 760 }
duke@0 761
duke@0 762 void TemplateTable::daload() {
duke@0 763 transition(itos, dtos);
mockner@7970 764 // rax: index
duke@0 765 // rdx: array
duke@0 766 index_check(rdx, rax); // kills rbx
zmajo@8879 767 __ load_double(Address(rdx, rax,
zmajo@8879 768 Address::times_8,
zmajo@8879 769 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
duke@0 770 }
duke@0 771
duke@0 772 void TemplateTable::aaload() {
duke@0 773 transition(itos, atos);
mockner@7970 774 // rax: index
duke@0 775 // rdx: array
duke@0 776 index_check(rdx, rax); // kills rbx
coleenp@113 777 __ load_heap_oop(rax, Address(rdx, rax,
mockner@7970 778 UseCompressedOops ? Address::times_4 : Address::times_ptr,
ysr@342 779 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
duke@0 780 }
duke@0 781
thartmann@13007 782 void TemplateTable::vaload() {
thartmann@13007 783 transition(itos, qtos);
thartmann@13007 784
thartmann@13007 785 Register array = rcx;
thartmann@13007 786 Register index = rax;
thartmann@13007 787
thartmann@13007 788 index_check(array, index); // kills rbx, pops array
thartmann@13007 789
thartmann@13007 790 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load) , array, index);
thartmann@13007 791 }
thartmann@13007 792
duke@0 793 void TemplateTable::baload() {
duke@0 794 transition(itos, itos);
mockner@7970 795 // rax: index
duke@0 796 // rdx: array
duke@0 797 index_check(rdx, rax); // kills rbx
mockner@7970 798 __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
duke@0 799 }
duke@0 800
duke@0 801 void TemplateTable::caload() {
duke@0 802 transition(itos, itos);
mockner@7970 803 // rax: index
mockner@7970 804 // rdx: array
mockner@7970 805 index_check(rdx, rax); // kills rbx
mockner@7970 806 __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
mockner@7970 807 }
mockner@7970 808
mockner@7970 809 // iload followed by caload frequent pair
mockner@7970 810 void TemplateTable::fast_icaload() {
mockner@7970 811 transition(vtos, itos);
mockner@7970 812 // load index out of locals
mockner@7970 813 locals_index(rbx);
mockner@7970 814 __ movl(rax, iaddress(rbx));
mockner@7970 815
mockner@7970 816 // rax: index
duke@0 817 // rdx: array
duke@0 818 index_check(rdx, rax); // kills rbx
jrose@622 819 __ load_unsigned_short(rax,
jrose@622 820 Address(rdx, rax,
jrose@622 821 Address::times_2,
jrose@622 822 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
duke@0 823 }
duke@0 824
duke@0 825
duke@0 826 void TemplateTable::saload() {
duke@0 827 transition(itos, itos);
mockner@7970 828 // rax: index
duke@0 829 // rdx: array
duke@0 830 index_check(rdx, rax); // kills rbx
mockner@7970 831 __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
duke@0 832 }
duke@0 833
duke@0 834 void TemplateTable::iload(int n) {
duke@0 835 transition(vtos, itos);
duke@0 836 __ movl(rax, iaddress(n));
duke@0 837 }
duke@0 838
duke@0 839 void TemplateTable::lload(int n) {
duke@0 840 transition(vtos, ltos);
mockner@7970 841 __ movptr(rax, laddress(n));
mockner@7970 842 NOT_LP64(__ movptr(rdx, haddress(n)));
duke@0 843 }
duke@0 844
duke@0 845 void TemplateTable::fload(int n) {
duke@0 846 transition(vtos, ftos);
zmajo@8879 847 __ load_float(faddress(n));
duke@0 848 }
duke@0 849
duke@0 850 void TemplateTable::dload(int n) {
duke@0 851 transition(vtos, dtos);
zmajo@8879 852 __ load_double(daddress(n));
duke@0 853 }
duke@0 854
duke@0 855 void TemplateTable::aload(int n) {
duke@0 856 transition(vtos, atos);
never@304 857 __ movptr(rax, aaddress(n));
duke@0 858 }
duke@0 859
duke@0 860 void TemplateTable::aload_0() {
minqi@8102 861 aload_0_internal();
minqi@8102 862 }
minqi@8102 863
minqi@8102 864 void TemplateTable::nofast_aload_0() {
minqi@8102 865 aload_0_internal(may_not_rewrite);
minqi@8102 866 }
minqi@8102 867
minqi@8102 868 void TemplateTable::aload_0_internal(RewriteControl rc) {
duke@0 869 transition(vtos, atos);
duke@0 870 // According to bytecode histograms, the pairs:
duke@0 871 //
duke@0 872 // _aload_0, _fast_igetfield
duke@0 873 // _aload_0, _fast_agetfield
duke@0 874 // _aload_0, _fast_fgetfield
duke@0 875 //
duke@0 876 // occur frequently. If RewriteFrequentPairs is set, the (slow)
duke@0 877 // _aload_0 bytecode checks if the next bytecode is either
duke@0 878 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
duke@0 879 // rewrites the current bytecode into a pair bytecode; otherwise it
duke@0 880 // rewrites the current bytecode into _fast_aload_0 that doesn't do
duke@0 881 // the pair check anymore.
duke@0 882 //
duke@0 883 // Note: If the next bytecode is _getfield, the rewrite must be
duke@0 884 // delayed, otherwise we may miss an opportunity for a pair.
duke@0 885 //
duke@0 886 // Also rewrite frequent pairs
duke@0 887 // aload_0, aload_1
duke@0 888 // aload_0, iload_1
duke@0 889 // These bytecodes with a small amount of code are most profitable
duke@0 890 // to rewrite
minqi@8102 891 if (RewriteFrequentPairs && rc == may_rewrite) {
duke@0 892 Label rewrite, done;
mockner@7970 893
mockner@7970 894 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
mockner@7970 895 LP64_ONLY(assert(rbx != bc, "register damaged"));
mockner@7970 896
duke@0 897 // get next byte
mockner@7970 898 __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
duke@0 899
duke@0 900 // if _getfield then wait with rewrite
duke@0 901 __ cmpl(rbx, Bytecodes::_getfield);
duke@0 902 __ jcc(Assembler::equal, done);
duke@0 903
coleenp@11829 904 // if _igetfield then rewrite to _fast_iaccess_0
mockner@7970 905 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 906 __ cmpl(rbx, Bytecodes::_fast_igetfield);
duke@0 907 __ movl(bc, Bytecodes::_fast_iaccess_0);
duke@0 908 __ jccb(Assembler::equal, rewrite);
duke@0 909
coleenp@11829 910 // if _agetfield then rewrite to _fast_aaccess_0
mockner@7970 911 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 912 __ cmpl(rbx, Bytecodes::_fast_agetfield);
duke@0 913 __ movl(bc, Bytecodes::_fast_aaccess_0);
duke@0 914 __ jccb(Assembler::equal, rewrite);
duke@0 915
coleenp@11829 916 // if _fgetfield then rewrite to _fast_faccess_0
mockner@7970 917 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 918 __ cmpl(rbx, Bytecodes::_fast_fgetfield);
duke@0 919 __ movl(bc, Bytecodes::_fast_faccess_0);
duke@0 920 __ jccb(Assembler::equal, rewrite);
duke@0 921
duke@0 922 // else rewrite to _fast_aload0
mockner@7970 923 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
duke@0 924 __ movl(bc, Bytecodes::_fast_aload_0);
duke@0 925
duke@0 926 // rewrite
duke@0 927 // bc: fast bytecode
duke@0 928 __ bind(rewrite);
duke@0 929 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
duke@0 930
duke@0 931 __ bind(done);
duke@0 932 }
coleenp@11829 933
coleenp@11829 934 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
coleenp@11829 935 aload(0);
duke@0 936 }
duke@0 937
duke@0 938 void TemplateTable::istore() {
duke@0 939 transition(itos, vtos);
duke@0 940 locals_index(rbx);
duke@0 941 __ movl(iaddress(rbx), rax);
duke@0 942 }
duke@0 943
mockner@7970 944
duke@0 945 void TemplateTable::lstore() {
duke@0 946 transition(ltos, vtos);
duke@0 947 locals_index(rbx);
mockner@7970 948 __ movptr(laddress(rbx), rax);
mockner@7970 949 NOT_LP64(__ movptr(haddress(rbx), rdx));
duke@0 950 }
duke@0 951
duke@0 952 void TemplateTable::fstore() {
duke@0 953 transition(ftos, vtos);
duke@0 954 locals_index(rbx);
zmajo@8879 955 __ store_float(faddress(rbx));
duke@0 956 }
duke@0 957
duke@0 958 void TemplateTable::dstore() {
duke@0 959 transition(dtos, vtos);
duke@0 960 locals_index(rbx);
zmajo@8879 961 __ store_double(daddress(rbx));
duke@0 962 }
duke@0 963
duke@0 964 void TemplateTable::astore() {
duke@0 965 transition(vtos, vtos);
twisti@1426 966 __ pop_ptr(rax);
duke@0 967 locals_index(rbx);
never@304 968 __ movptr(aaddress(rbx), rax);
duke@0 969 }
duke@0 970
thartmann@13007 971 void TemplateTable::vstore() {
thartmann@13007 972 transition(vtos, vtos);
thartmann@13007 973 __ pop_ptr(rax);
thartmann@13007 974 locals_index(rbx);
thartmann@13007 975 __ movptr(aaddress(rbx), rax);
thartmann@13007 976 }
thartmann@13007 977
duke@0 978 void TemplateTable::wide_istore() {
duke@0 979 transition(vtos, vtos);
duke@0 980 __ pop_i();
duke@0 981 locals_index_wide(rbx);
duke@0 982 __ movl(iaddress(rbx), rax);
duke@0 983 }
duke@0 984
duke@0 985 void TemplateTable::wide_lstore() {
duke@0 986 transition(vtos, vtos);
mockner@7970 987 NOT_LP64(__ pop_l(rax, rdx));
mockner@7970 988 LP64_ONLY(__ pop_l());
duke@0 989 locals_index_wide(rbx);
mockner@7970 990 __ movptr(laddress(rbx), rax);
mockner@7970 991 NOT_LP64(__ movl(haddress(rbx), rdx));
duke@0 992 }
duke@0 993
duke@0 994 void TemplateTable::wide_fstore() {
mockner@7970 995 #ifdef _LP64
duke@0 996 transition(vtos, vtos);
zmajo@8879 997 __ pop_f(xmm0);
duke@0 998 locals_index_wide(rbx);
duke@0 999 __ movflt(faddress(rbx), xmm0);
mockner@7970 1000 #else
mockner@7970 1001 wide_istore();
mockner@7970 1002 #endif
duke@0 1003 }
duke@0 1004
duke@0 1005 void TemplateTable::wide_dstore() {
mockner@7970 1006 #ifdef _LP64
duke@0 1007 transition(vtos, vtos);
zmajo@8879 1008 __ pop_d(xmm0);
duke@0 1009 locals_index_wide(rbx);
duke@0 1010 __ movdbl(daddress(rbx), xmm0);
mockner@7970 1011 #else
mockner@7970 1012 wide_lstore();
mockner@7970 1013 #endif
duke@0 1014 }
duke@0 1015
duke@0 1016 void TemplateTable::wide_astore() {
duke@0 1017 transition(vtos, vtos);
twisti@1426 1018 __ pop_ptr(rax);
duke@0 1019 locals_index_wide(rbx);
never@304 1020 __ movptr(aaddress(rbx), rax);
duke@0 1021 }
duke@0 1022
thartmann@13007 1023 void TemplateTable::wide_vstore() {
thartmann@13007 1024 transition(vtos, vtos);
thartmann@13007 1025 __ pop_ptr(rax);
thartmann@13007 1026 locals_index_wide(rbx);
thartmann@13007 1027 __ movptr(aaddress(rbx), rax);
thartmann@13007 1028 }
thartmann@13007 1029
duke@0 1030 void TemplateTable::iastore() {
duke@0 1031 transition(itos, vtos);
duke@0 1032 __ pop_i(rbx);
mockner@7970 1033 // rax: value
mockner@7970 1034 // rbx: index
duke@0 1035 // rdx: array
mockner@7970 1036 index_check(rdx, rbx); // prefer index in rbx
duke@0 1037 __ movl(Address(rdx, rbx,
duke@0 1038 Address::times_4,
duke@0 1039 arrayOopDesc::base_offset_in_bytes(T_INT)),
duke@0 1040 rax);
duke@0 1041 }
duke@0 1042
duke@0 1043 void TemplateTable::lastore() {
duke@0 1044 transition(ltos, vtos);
duke@0 1045 __ pop_i(rbx);
mockner@7970 1046 // rax,: low(value)
mockner@7970 1047 // rcx: array
mockner@7970 1048 // rdx: high(value)
mockner@7970 1049 index_check(rcx, rbx); // prefer index in rbx,
mockner@7970 1050 // rbx,: index
mockner@7970 1051 __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
mockner@7970 1052 NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
duke@0 1053 }
duke@0 1054
mockner@7970 1055
duke@0 1056 void TemplateTable::fastore() {
duke@0 1057 transition(ftos, vtos);
duke@0 1058 __ pop_i(rbx);
zmajo@8879 1059 // value is in UseSSE >= 1 ? xmm0 : ST(0)
mockner@7970 1060 // rbx: index
duke@0 1061 // rdx: array
mockner@7970 1062 index_check(rdx, rbx); // prefer index in rbx
zmajo@8879 1063 __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
duke@0 1064 }
duke@0 1065
duke@0 1066 void TemplateTable::dastore() {
duke@0 1067 transition(dtos, vtos);
duke@0 1068 __ pop_i(rbx);
zmajo@8879 1069 // value is in UseSSE >= 2 ? xmm0 : ST(0)
mockner@7970 1070 // rbx: index
duke@0 1071 // rdx: array
mockner@7970 1072 index_check(rdx, rbx); // prefer index in rbx
zmajo@8879 1073 __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
duke@0 1074 }
duke@0 1075
duke@0 1076 void TemplateTable::aastore() {
duke@0 1077 Label is_null, ok_is_subtype, done;
duke@0 1078 transition(vtos, vtos);
duke@0 1079 // stack: ..., array, index, value
never@304 1080 __ movptr(rax, at_tos()); // value
duke@0 1081 __ movl(rcx, at_tos_p1()); // index
never@304 1082 __ movptr(rdx, at_tos_p2()); // array
ysr@342 1083
ysr@342 1084 Address element_address(rdx, rcx,
mockner@7970 1085 UseCompressedOops? Address::times_4 : Address::times_ptr,
ysr@342 1086 arrayOopDesc::base_offset_in_bytes(T_OBJECT));
ysr@342 1087
mockner@7970 1088 index_check_without_pop(rdx, rcx); // kills rbx
never@304 1089 __ testptr(rax, rax);
duke@0 1090 __ jcc(Assembler::zero, is_null);
duke@0 1091
duke@0 1092 // Move subklass into rbx
coleenp@113 1093 __ load_klass(rbx, rax);
duke@0 1094 // Move superklass into rax
coleenp@113 1095 __ load_klass(rax, rdx);
never@304 1096 __ movptr(rax, Address(rax,
coleenp@3707 1097 ObjArrayKlass::element_klass_offset()));
coleenp@113 1098 // Compress array + index*oopSize + 12 into a single register. Frees rcx.
apetrusenko@362 1099 __ lea(rdx, element_address);
duke@0 1100
duke@0 1101 // Generate subtype check. Blows rcx, rdi
duke@0 1102 // Superklass in rax. Subklass in rbx.
duke@0 1103 __ gen_subtype_check(rbx, ok_is_subtype);
duke@0 1104
duke@0 1105 // Come here on failure
duke@0 1106 // object is at TOS
duke@0 1107 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
duke@0 1108
duke@0 1109 // Come here on success
duke@0 1110 __ bind(ok_is_subtype);
ysr@342 1111
ysr@342 1112 // Get the value we will store
apetrusenko@362 1113 __ movptr(rax, at_tos());
ysr@342 1114 // Now store using the appropriate barrier
ysr@342 1115 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
duke@0 1116 __ jmp(done);
duke@0 1117
duke@0 1118 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx]
duke@0 1119 __ bind(is_null);
duke@0 1120 __ profile_null_seen(rbx);
ysr@342 1121
ysr@342 1122 // Store a NULL
ysr@342 1123 do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
duke@0 1124
duke@0 1125 // Pop stack arguments
duke@0 1126 __ bind(done);
twisti@1426 1127 __ addptr(rsp, 3 * Interpreter::stackElementSize);
duke@0 1128 }
duke@0 1129
thartmann@13007 1130 void TemplateTable::vastore() {
thartmann@13007 1131 transition(vtos, vtos);
thartmann@13007 1132
thartmann@13007 1133 Register value = rcx;
thartmann@13007 1134 Register index = rbx;
thartmann@13007 1135 Register array = rax;
thartmann@13007 1136
thartmann@13007 1137 // stack: ..., array, index, value
thartmann@13007 1138 __ pop_ptr(value);
thartmann@13007 1139 __ pop_i(index);
thartmann@13007 1140 __ pop_ptr(array);
thartmann@13007 1141
thartmann@13007 1142 index_check_without_pop(array, index);
thartmann@13007 1143
thartmann@13007 1144 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), array, index, value);
thartmann@13007 1145 }
thartmann@13007 1146
duke@0 1147 void TemplateTable::bastore() {
duke@0 1148 transition(itos, vtos);
duke@0 1149 __ pop_i(rbx);
mockner@7970 1150 // rax: value
mockner@7970 1151 // rbx: index
duke@0 1152 // rdx: array
mockner@7970 1153 index_check(rdx, rbx); // prefer index in rbx
coleenp@10885 1154 // Need to check whether array is boolean or byte
coleenp@10885 1155 // since both types share the bastore bytecode.
coleenp@10885 1156 __ load_klass(rcx, rdx);
coleenp@10885 1157 __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
coleenp@10885 1158 int diffbit = Klass::layout_helper_boolean_diffbit();
coleenp@10885 1159 __ testl(rcx, diffbit);
coleenp@10885 1160 Label L_skip;
coleenp@10885 1161 __ jccb(Assembler::zero, L_skip);
coleenp@10885 1162 __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
coleenp@10885 1163 __ bind(L_skip);
duke@0 1164 __ movb(Address(rdx, rbx,
duke@0 1165 Address::times_1,
duke@0 1166 arrayOopDesc::base_offset_in_bytes(T_BYTE)),
duke@0 1167 rax);
duke@0 1168 }
duke@0 1169
duke@0 1170 void TemplateTable::castore() {
duke@0 1171 transition(itos, vtos);
duke@0 1172 __ pop_i(rbx);
mockner@7970 1173 // rax: value
mockner@7970 1174 // rbx: index
duke@0 1175 // rdx: array
mockner@7970 1176 index_check(rdx, rbx); // prefer index in rbx
duke@0 1177 __ movw(Address(rdx, rbx,
duke@0 1178 Address::times_2,
duke@0 1179 arrayOopDesc::base_offset_in_bytes(T_CHAR)),
duke@0 1180 rax);
duke@0 1181 }
duke@0 1182
mockner@7970 1183
duke@0 1184 void TemplateTable::sastore() {
duke@0 1185 castore();
duke@0 1186 }
duke@0 1187
duke@0 1188 void TemplateTable::istore(int n) {
duke@0 1189 transition(itos, vtos);
duke@0 1190 __ movl(iaddress(n), rax);
duke@0 1191 }
duke@0 1192
duke@0 1193 void TemplateTable::lstore(int n) {
duke@0 1194 transition(ltos, vtos);
mockner@7970 1195 __ movptr(laddress(n), rax);
mockner@7970 1196 NOT_LP64(__ movptr(haddress(n), rdx));
duke@0 1197 }
duke@0 1198
duke@0 1199 void TemplateTable::fstore(int n) {
duke@0 1200 transition(ftos, vtos);
zmajo@8879 1201 __ store_float(faddress(n));
duke@0 1202 }
duke@0 1203
duke@0 1204 void TemplateTable::dstore(int n) {
duke@0 1205 transition(dtos, vtos);
zmajo@8879 1206 __ store_double(daddress(n));
duke@0 1207 }
duke@0 1208
mockner@7970 1209
duke@0 1210 void TemplateTable::astore(int n) {
duke@0 1211 transition(vtos, vtos);
twisti@1426 1212 __ pop_ptr(rax);
never@304 1213 __ movptr(aaddress(n), rax);
duke@0 1214 }
duke@0 1215
duke@0 1216 void TemplateTable::pop() {
duke@0 1217 transition(vtos, vtos);
twisti@1426 1218 __ addptr(rsp, Interpreter::stackElementSize);
duke@0 1219 }
duke@0 1220
duke@0 1221 void TemplateTable::pop2() {
duke@0 1222 transition(vtos, vtos);
twisti@1426 1223 __ addptr(rsp, 2 * Interpreter::stackElementSize);
duke@0 1224 }
duke@0 1225
mockner@7970 1226
duke@0 1227 void TemplateTable::dup() {
duke@0 1228 transition(vtos, vtos);
twisti@1426 1229 __ load_ptr(0, rax);
twisti@1426 1230 __ push_ptr(rax);
duke@0 1231 // stack: ..., a, a
duke@0 1232 }
duke@0 1233
duke@0 1234 void TemplateTable::dup_x1() {
duke@0 1235 transition(vtos, vtos);
duke@0 1236 // stack: ..., a, b
twisti@1426 1237 __ load_ptr( 0, rax); // load b
twisti@1426 1238 __ load_ptr( 1, rcx); // load a
twisti@1426 1239 __ store_ptr(1, rax); // store b
twisti@1426 1240 __ store_ptr(0, rcx); // store a
twisti@1426 1241 __ push_ptr(rax); // push b
duke@0 1242 // stack: ..., b, a, b
duke@0 1243 }
duke@0 1244
duke@0 1245 void TemplateTable::dup_x2() {
duke@0 1246 transition(vtos, vtos);
duke@0 1247 // stack: ..., a, b, c
twisti@1426 1248 __ load_ptr( 0, rax); // load c
twisti@1426 1249 __ load_ptr( 2, rcx); // load a
twisti@1426 1250 __ store_ptr(2, rax); // store c in a
twisti@1426 1251 __ push_ptr(rax); // push c
duke@0 1252 // stack: ..., c, b, c, c
twisti@1426 1253 __ load_ptr( 2, rax); // load b
twisti@1426 1254 __ store_ptr(2, rcx); // store a in b
duke@0 1255 // stack: ..., c, a, c, c
twisti@1426 1256 __ store_ptr(1, rax); // store b in c
duke@0 1257 // stack: ..., c, a, b, c
duke@0 1258 }
duke@0 1259
duke@0 1260 void TemplateTable::dup2() {
duke@0 1261 transition(vtos, vtos);
duke@0 1262 // stack: ..., a, b
twisti@1426 1263 __ load_ptr(1, rax); // load a
twisti@1426 1264 __ push_ptr(rax); // push a
twisti@1426 1265 __ load_ptr(1, rax); // load b
twisti@1426 1266 __ push_ptr(rax); // push b
duke@0 1267 // stack: ..., a, b, a, b
duke@0 1268 }
duke@0 1269
mockner@7970 1270
duke@0 1271 void TemplateTable::dup2_x1() {
duke@0 1272 transition(vtos, vtos);
duke@0 1273 // stack: ..., a, b, c
twisti@1426 1274 __ load_ptr( 0, rcx); // load c
twisti@1426 1275 __ load_ptr( 1, rax); // load b
twisti@1426 1276 __ push_ptr(rax); // push b
twisti@1426 1277 __ push_ptr(rcx); // push c
duke@0 1278 // stack: ..., a, b, c, b, c
twisti@1426 1279 __ store_ptr(3, rcx); // store c in b
duke@0 1280 // stack: ..., a, c, c, b, c
twisti@1426 1281 __ load_ptr( 4, rcx); // load a
twisti@1426 1282 __ store_ptr(2, rcx); // store a in 2nd c
duke@0 1283 // stack: ..., a, c, a, b, c
twisti@1426 1284 __ store_ptr(4, rax); // store b in a
duke@0 1285 // stack: ..., b, c, a, b, c
duke@0 1286 }
duke@0 1287
duke@0 1288 void TemplateTable::dup2_x2() {
duke@0 1289 transition(vtos, vtos);
duke@0 1290 // stack: ..., a, b, c, d
twisti@1426 1291 __ load_ptr( 0, rcx); // load d
twisti@1426 1292 __ load_ptr( 1, rax); // load c
twisti@1426 1293 __ push_ptr(rax); // push c
twisti@1426 1294 __ push_ptr(rcx); // push d
duke@0 1295 // stack: ..., a, b, c, d, c, d
twisti@1426 1296 __ load_ptr( 4, rax); // load b
twisti@1426 1297 __ store_ptr(2, rax); // store b in d
twisti@1426 1298 __ store_ptr(4, rcx); // store d in b
duke@0 1299 // stack: ..., a, d, c, b, c, d
twisti@1426 1300 __ load_ptr( 5, rcx); // load a
twisti@1426 1301 __ load_ptr( 3, rax); // load c
twisti@1426 1302 __ store_ptr(3, rcx); // store a in c
twisti@1426 1303 __ store_ptr(5, rax); // store c in a
duke@0 1304 // stack: ..., c, d, a, b, c, d
duke@0 1305 }
duke@0 1306
duke@0 1307 void TemplateTable::swap() {
duke@0 1308 transition(vtos, vtos);
duke@0 1309 // stack: ..., a, b
twisti@1426 1310 __ load_ptr( 1, rcx); // load a
twisti@1426 1311 __ load_ptr( 0, rax); // load b
twisti@1426 1312 __ store_ptr(0, rcx); // store a in b
twisti@1426 1313 __ store_ptr(1, rax); // store b in a
duke@0 1314 // stack: ..., b, a
duke@0 1315 }
duke@0 1316
duke@0 1317 void TemplateTable::iop2(Operation op) {
duke@0 1318 transition(itos, itos);
duke@0 1319 switch (op) {
duke@0 1320 case add : __ pop_i(rdx); __ addl (rax, rdx); break;
duke@0 1321 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
duke@0 1322 case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
duke@0 1323 case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
duke@0 1324 case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
duke@0 1325 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
duke@0 1326 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break;
duke@0 1327 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break;
duke@0 1328 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break;
duke@0 1329 default : ShouldNotReachHere();
duke@0 1330 }
duke@0 1331 }
duke@0 1332
duke@0 1333 void TemplateTable::lop2(Operation op) {
duke@0 1334 transition(ltos, ltos);
mockner@7970 1335 #ifdef _LP64
duke@0 1336 switch (op) {
twisti@1426 1337 case add : __ pop_l(rdx); __ addptr(rax, rdx); break;
twisti@1426 1338 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break;
twisti@1426 1339 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break;
twisti@1426 1340 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
twisti@1426 1341 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break;
twisti@1426 1342 default : ShouldNotReachHere();
duke@0 1343 }
mockner@7970 1344 #else
mockner@7970 1345 __ pop_l(rbx, rcx);
mockner@7970 1346 switch (op) {
mockner@7970 1347 case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
mockner@7970 1348 case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
mockner@7970 1349 __ mov (rax, rbx); __ mov (rdx, rcx); break;
mockner@7970 1350 case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
mockner@7970 1351 case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
mockner@7970 1352 case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
mockner@7970 1353 default : ShouldNotReachHere();
mockner@7970 1354 }
mockner@7970 1355 #endif
duke@0 1356 }
duke@0 1357
duke@0 1358 void TemplateTable::idiv() {
duke@0 1359 transition(itos, itos);
duke@0 1360 __ movl(rcx, rax);
duke@0 1361 __ pop_i(rax);
mockner@7970 1362 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
duke@0 1363 // they are not equal, one could do a normal division (no correction
duke@0 1364 // needed), which may speed up this implementation for the common case.
duke@0 1365 // (see also JVM spec., p.243 & p.271)
duke@0 1366 __ corrected_idivl(rcx);
duke@0 1367 }
duke@0 1368
duke@0 1369 void TemplateTable::irem() {
duke@0 1370 transition(itos, itos);
duke@0 1371 __ movl(rcx, rax);
duke@0 1372 __ pop_i(rax);
mockner@7970 1373 // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
duke@0 1374 // they are not equal, one could do a normal division (no correction
duke@0 1375 // needed), which may speed up this implementation for the common case.
duke@0 1376 // (see also JVM spec., p.243 & p.271)
duke@0 1377 __ corrected_idivl(rcx);
duke@0 1378 __ movl(rax, rdx);
duke@0 1379 }
duke@0 1380
duke@0 1381 void TemplateTable::lmul() {
duke@0 1382 transition(ltos, ltos);
mockner@7970 1383 #ifdef _LP64
duke@0 1384 __ pop_l(rdx);
duke@0 1385 __ imulq(rax, rdx);
mockner@7970 1386 #else
mockner@7970 1387 __ pop_l(rbx, rcx);
mockner@7970 1388 __ push(rcx); __ push(rbx);
mockner@7970 1389 __ push(rdx); __ push(rax);
mockner@7970 1390 __ lmul(2 * wordSize, 0);
mockner@7970 1391 __ addptr(rsp, 4 * wordSize); // take off temporaries
mockner@7970 1392 #endif
duke@0 1393 }
duke@0 1394
duke@0 1395 void TemplateTable::ldiv() {
duke@0 1396 transition(ltos, ltos);
mockner@7970 1397 #ifdef _LP64
never@304 1398 __ mov(rcx, rax);
duke@0 1399 __ pop_l(rax);
duke@0 1400 // generate explicit div0 check
duke@0 1401 __ testq(rcx, rcx);
duke@0 1402 __ jump_cc(Assembler::zero,
duke@0 1403 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
duke@0 1404 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
duke@0 1405 // they are not equal, one could do a normal division (no correction
duke@0 1406 // needed), which may speed up this implementation for the common case.
duke@0 1407 // (see also JVM spec., p.243 & p.271)
duke@0 1408 __ corrected_idivq(rcx); // kills rbx
mockner@7970 1409 #else
mockner@7970 1410 __ pop_l(rbx, rcx);
mockner@7970 1411 __ push(rcx); __ push(rbx);
mockner@7970 1412 __ push(rdx); __ push(rax);
mockner@7970 1413 // check if y = 0
mockner@7970 1414 __ orl(rax, rdx);
mockner@7970 1415 __ jump_cc(Assembler::zero,
mockner@7970 1416 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
mockner@7970 1417 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
mockner@7970 1418 __ addptr(rsp, 4 * wordSize); // take off temporaries
mockner@7970 1419 #endif
duke@0 1420 }
duke@0 1421
duke@0 1422 void TemplateTable::lrem() {
duke@0 1423 transition(ltos, ltos);
mockner@7970 1424 #ifdef _LP64
never@304 1425 __ mov(rcx, rax);
duke@0 1426 __ pop_l(rax);
duke@0 1427 __ testq(rcx, rcx);
duke@0 1428 __ jump_cc(Assembler::zero,
duke@0 1429 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
duke@0 1430 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
duke@0 1431 // they are not equal, one could do a normal division (no correction
duke@0 1432 // needed), which may speed up this implementation for the common case.
duke@0 1433 // (see also JVM spec., p.243 & p.271)
duke@0 1434 __ corrected_idivq(rcx); // kills rbx
never@304 1435 __ mov(rax, rdx);
mockner@7970 1436 #else
mockner@7970 1437 __ pop_l(rbx, rcx);
mockner@7970 1438 __ push(rcx); __ push(rbx);
mockner@7970 1439 __ push(rdx); __ push(rax);
mockner@7970 1440 // check if y = 0
mockner@7970 1441 __ orl(rax, rdx);
mockner@7970 1442 __ jump_cc(Assembler::zero,
mockner@7970 1443 ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
mockner@7970 1444 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
mockner@7970 1445 __ addptr(rsp, 4 * wordSize);
mockner@7970 1446 #endif
duke@0 1447 }
duke@0 1448
duke@0 1449 void TemplateTable::lshl() {
duke@0 1450 transition(itos, ltos);
duke@0 1451 __ movl(rcx, rax); // get shift count
mockner@7970 1452 #ifdef _LP64
duke@0 1453 __ pop_l(rax); // get shift value
duke@0 1454 __ shlq(rax);
mockner@7970 1455 #else
mockner@7970 1456 __ pop_l(rax, rdx); // get shift value
mockner@7970 1457 __ lshl(rdx, rax);
mockner@7970 1458 #endif
duke@0 1459 }
duke@0 1460
duke@0 1461 void TemplateTable::lshr() {
mockner@7970 1462 #ifdef _LP64
duke@0 1463 transition(itos, ltos);
duke@0 1464 __ movl(rcx, rax); // get shift count
duke@0 1465 __ pop_l(rax); // get shift value
duke@0 1466 __ sarq(rax);
mockner@7970 1467 #else
mockner@7970 1468 transition(itos, ltos);
mockner@7970 1469 __ mov(rcx, rax); // get shift count
mockner@7970 1470 __ pop_l(rax, rdx); // get shift value
mockner@7970 1471 __ lshr(rdx, rax, true);
mockner@7970 1472 #endif
duke@0 1473 }
duke@0 1474
duke@0 1475 void TemplateTable::lushr() {
duke@0 1476 transition(itos, ltos);
mockner@7970 1477 #ifdef _LP64
duke@0 1478 __ movl(rcx, rax); // get shift count
duke@0 1479 __ pop_l(rax); // get shift value
duke@0 1480 __ shrq(rax);
mockner@7970 1481 #else
mockner@7970 1482 __ mov(rcx, rax); // get shift count
mockner@7970 1483 __ pop_l(rax, rdx); // get shift value
mockner@7970 1484 __ lshr(rdx, rax);
mockner@7970 1485 #endif
duke@0 1486 }
duke@0 1487
duke@0 1488 void TemplateTable::fop2(Operation op) {
duke@0 1489 transition(ftos, ftos);
zmajo@8879 1490
zmajo@8879 1491 if (UseSSE >= 1) {
zmajo@8879 1492 switch (op) {
zmajo@8879 1493 case add:
zmajo@8879 1494 __ addss(xmm0, at_rsp());
zmajo@8879 1495 __ addptr(rsp, Interpreter::stackElementSize);
zmajo@8879 1496 break;
zmajo@8879 1497 case sub:
zmajo@8879 1498 __ movflt(xmm1, xmm0);
zmajo@8879 1499 __ pop_f(xmm0);
zmajo@8879 1500 __ subss(xmm0, xmm1);
zmajo@8879 1501 break;
zmajo@8879 1502 case mul:
zmajo@8879 1503 __ mulss(xmm0, at_rsp());
zmajo@8879 1504 __ addptr(rsp, Interpreter::stackElementSize);
zmajo@8879 1505 break;
zmajo@8879 1506 case div:
zmajo@8879 1507 __ movflt(xmm1, xmm0);
zmajo@8879 1508 __ pop_f(xmm0);
zmajo@8879 1509 __ divss(xmm0, xmm1);
zmajo@8879 1510 break;
zmajo@8879 1511 case rem:
zmajo@8879 1512 // On x86_64 platforms the SharedRuntime::frem method is called to perform the
zmajo@8879 1513 // modulo operation. The frem method calls the function
zmajo@8879 1514 // double fmod(double x, double y) in math.h. The documentation of fmod states:
zmajo@8879 1515 // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
zmajo@8879 1516 // (signalling or quiet) is returned.
zmajo@8879 1517 //
zmajo@8879 1518 // On x86_32 platforms the FPU is used to perform the modulo operation. The
zmajo@8879 1519 // reason is that on 32-bit Windows the sign of modulo operations diverges from
zmajo@8879 1520 // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
zmajo@8879 1521 // The fprem instruction used on x86_32 is functionally equivalent to
zmajo@8879 1522 // SharedRuntime::frem in that it returns a NaN.
mockner@7970 1523 #ifdef _LP64
zmajo@8879 1524 __ movflt(xmm1, xmm0);
zmajo@8879 1525 __ pop_f(xmm0);
zmajo@8879 1526 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
zmajo@8879 1527 #else
zmajo@8879 1528 __ push_f(xmm0);
zmajo@8879 1529 __ pop_f();
zmajo@8879 1530 __ fld_s(at_rsp());
zmajo@8879 1531 __ fremr(rax);
zmajo@8879 1532 __ f2ieee();
zmajo@8879 1533 __ pop(rax); // pop second operand off the stack
zmajo@8879 1534 __ push_f();
zmajo@8879 1535 __ pop_f(xmm0);
zmajo@8879 1536 #endif
zmajo@8879 1537 break;
zmajo@8879 1538 default:
zmajo@8879 1539 ShouldNotReachHere();
zmajo@8879 1540 break;
zmajo@8879 1541 }
zmajo@8879 1542 } else {
zmajo@8879 1543 #ifdef _LP64
duke@0 1544 ShouldNotReachHere();
mockner@7970 1545 #else
zmajo@8879 1546 switch (op) {
mockner@7970 1547 case add: __ fadd_s (at_rsp()); break;
mockner@7970 1548 case sub: __ fsubr_s(at_rsp()); break;
mockner@7970 1549 case mul: __ fmul_s (at_rsp()); break;
mockner@7970 1550 case div: __ fdivr_s(at_rsp()); break;
mockner@7970 1551 case rem: __ fld_s (at_rsp()); __ fremr(rax); break;
mockner@7970 1552 default : ShouldNotReachHere();
zmajo@8879 1553 }
zmajo@8879 1554 __ f2ieee();
zmajo@8879 1555 __ pop(rax); // pop second operand off the stack
zmajo@8879 1556 #endif // _LP64
mockner@7970 1557 }
duke@0 1558 }
duke@0 1559
duke@0 1560 void TemplateTable::dop2(Operation op) {
duke@0 1561 transition(dtos, dtos);
zmajo@8879 1562 if (UseSSE >= 2) {
zmajo@8879 1563 switch (op) {
zmajo@8879 1564 case add:
zmajo@8879 1565 __ addsd(xmm0, at_rsp());
zmajo@8879 1566 __ addptr(rsp, 2 * Interpreter::stackElementSize);
zmajo@8879 1567 break;
zmajo@8879 1568 case sub:
zmajo@8879 1569 __ movdbl(xmm1, xmm0);
zmajo@8879 1570 __ pop_d(xmm0);
zmajo@8879 1571 __ subsd(xmm0, xmm1);
zmajo@8879 1572 break;
zmajo@8879 1573 case mul:
zmajo@8879 1574 __ mulsd(xmm0, at_rsp());
zmajo@8879 1575 __ addptr(rsp, 2 * Interpreter::stackElementSize);
zmajo@8879 1576 break;
zmajo@8879 1577 case div:
zmajo@8879 1578 __ movdbl(xmm1, xmm0);
zmajo@8879 1579 __ pop_d(xmm0);
zmajo@8879 1580 __ divsd(xmm0, xmm1);
zmajo@8879 1581 break;
zmajo@8879 1582 case rem:
zmajo@8879 1583 // Similar to fop2(), the modulo operation is performed using the
zmajo@8879 1584 // SharedRuntime::drem method (on x86_64 platforms) or using the
zmajo@8879 1585 // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
mockner@7970 1586 #ifdef _LP64
zmajo@8879 1587 __ movdbl(xmm1, xmm0);
zmajo@8879 1588 __ pop_d(xmm0);
zmajo@8879 1589 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
zmajo@8879 1590 #else
zmajo@8879 1591 __ push_d(xmm0);
zmajo@8879 1592 __ pop_d();
zmajo@8879 1593 __ fld_d(at_rsp());
zmajo@8879 1594 __ fremr(rax);
zmajo@8879 1595 __ d2ieee();
zmajo@8879 1596 __ pop(rax);
zmajo@8879 1597 __ pop(rdx);
zmajo@8879 1598 __ push_d();
zmajo@8879 1599 __ pop_d(xmm0);
zmajo@8879 1600 #endif
zmajo@8879 1601 break;
zmajo@8879 1602 default:
zmajo@8879 1603 ShouldNotReachHere();
zmajo@8879 1604 break;
zmajo@8879 1605 }
zmajo@8879 1606 } else {
zmajo@8879 1607 #ifdef _LP64
duke@0 1608 ShouldNotReachHere();
mockner@7970 1609 #else
zmajo@8879 1610 switch (op) {
mockner@7970 1611 case add: __ fadd_d (at_rsp()); break;
mockner@7970 1612 case sub: __ fsubr_d(at_rsp()); break;
mockner@7970 1613 case mul: {
mockner@7970 1614 Label L_strict;
mockner@7970 1615 Label L_join;
mockner@7970 1616 const Address access_flags (rcx, Method::access_flags_offset());
mockner@7970 1617 __ get_method(rcx);
mockner@7970 1618 __ movl(rcx, access_flags);
mockner@7970 1619 __ testl(rcx, JVM_ACC_STRICT);
mockner@7970 1620 __ jccb(Assembler::notZero, L_strict);
mockner@7970 1621 __ fmul_d (at_rsp());
mockner@7970 1622 __ jmpb(L_join);
mockner@7970 1623 __ bind(L_strict);
mockner@7970 1624 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
mockner@7970 1625 __ fmulp();
mockner@7970 1626 __ fmul_d (at_rsp());
mockner@7970 1627 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
mockner@7970 1628 __ fmulp();
mockner@7970 1629 __ bind(L_join);
mockner@7970 1630 break;
mockner@7970 1631 }
mockner@7970 1632 case div: {
mockner@7970 1633 Label L_strict;
mockner@7970 1634 Label L_join;
mockner@7970 1635 const Address access_flags (rcx, Method::access_flags_offset());
mockner@7970 1636 __ get_method(rcx);
mockner@7970 1637 __ movl(rcx, access_flags);
mockner@7970 1638 __ testl(rcx, JVM_ACC_STRICT);
mockner@7970 1639 __ jccb(Assembler::notZero, L_strict);
mockner@7970 1640 __ fdivr_d(at_rsp());
mockner@7970 1641 __ jmp(L_join);
mockner@7970 1642 __ bind(L_strict);
mockner@7970 1643 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
mockner@7970 1644 __ fmul_d (at_rsp());
mockner@7970 1645 __ fdivrp();
mockner@7970 1646 __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
mockner@7970 1647 __ fmulp();
mockner@7970 1648 __ bind(L_join);
mockner@7970 1649 break;
mockner@7970 1650 }
mockner@7970 1651 case rem: __ fld_d (at_rsp()); __ fremr(rax); break;
mockner@7970 1652 default : ShouldNotReachHere();
zmajo@8879 1653 }
zmajo@8879 1654 __ d2ieee();
zmajo@8879 1655 // Pop double precision number from rsp.
zmajo@8879 1656 __ pop(rax);
zmajo@8879 1657 __ pop(rdx);
zmajo@8879 1658 #endif
mockner@7970 1659 }
duke@0 1660 }
duke@0 1661
duke@0 1662 void TemplateTable::ineg() {
duke@0 1663 transition(itos, itos);
duke@0 1664 __ negl(rax);
duke@0 1665 }
duke@0 1666
duke@0 1667 void TemplateTable::lneg() {
duke@0 1668 transition(ltos, ltos);
mockner@7970 1669 LP64_ONLY(__ negq(rax));
mockner@7970 1670 NOT_LP64(__ lneg(rdx, rax));
duke@0 1671 }
duke@0 1672
duke@0 1673 // Note: 'double' and 'long long' have 32-bits alignment on x86.
duke@0 1674 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
duke@0 1675 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
duke@0 1676 // of 128-bits operands for SSE instructions.
duke@0 1677 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
duke@0 1678 // Store the value to a 128-bits operand.
duke@0 1679 operand[0] = lo;
duke@0 1680 operand[1] = hi;
duke@0 1681 return operand;
duke@0 1682 }
duke@0 1683
duke@0 1684 // Buffer for 128-bits masks used by SSE instructions.
duke@0 1685 static jlong float_signflip_pool[2*2];
duke@0 1686 static jlong double_signflip_pool[2*2];
duke@0 1687
duke@0 1688 void TemplateTable::fneg() {
duke@0 1689 transition(ftos, ftos);
zmajo@8879 1690 if (UseSSE >= 1) {
goetz@9364 1691 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
zmajo@8879 1692 __ xorps(xmm0, ExternalAddress((address) float_signflip));
zmajo@8879 1693 } else {
zmajo@8879 1694 LP64_ONLY(ShouldNotReachHere());
zmajo@8879 1695 NOT_LP64(__ fchs());
zmajo@8879 1696 }
duke@0 1697 }
duke@0 1698
duke@0 1699 void TemplateTable::dneg() {
duke@0 1700 transition(dtos, dtos);
zmajo@8879 1701 if (UseSSE >= 2) {
goetz@9364 1702 static jlong *double_signflip =
goetz@9364 1703 double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
zmajo@8879 1704 __ xorpd(xmm0, ExternalAddress((address) double_signflip));
zmajo@8879 1705 } else {
mockner@7970 1706 #ifdef _LP64
zmajo@8879 1707 ShouldNotReachHere();
mockner@7970 1708 #else
zmajo@8879 1709 __ fchs();
mockner@7970 1710 #endif
zmajo@8879 1711 }
duke@0 1712 }
duke@0 1713
duke@0 1714 void TemplateTable::iinc() {
duke@0 1715 transition(vtos, vtos);
duke@0 1716 __ load_signed_byte(rdx, at_bcp(2)); // get constant
duke@0 1717 locals_index(rbx);
duke@0 1718 __ addl(iaddress(rbx), rdx);
duke@0 1719 }
duke@0 1720
duke@0 1721 void TemplateTable::wide_iinc() {
duke@0 1722 transition(vtos, vtos);
duke@0 1723 __ movl(rdx, at_bcp(4)); // get constant
duke@0 1724 locals_index_wide(rbx);
duke@0 1725 __ bswapl(rdx); // swap bytes & sign-extend constant
duke@0 1726 __ sarl(rdx, 16);
duke@0 1727 __ addl(iaddress(rbx), rdx);
duke@0 1728 // Note: should probably use only one movl to get both
duke@0 1729 // the index and the constant -> fix this
duke@0 1730 }
duke@0 1731
duke@0 1732 void TemplateTable::convert() {
mockner@7970 1733 #ifdef _LP64
duke@0 1734 // Checking
duke@0 1735 #ifdef ASSERT
duke@0 1736 {
duke@0 1737 TosState tos_in = ilgl;
duke@0 1738 TosState tos_out = ilgl;
duke@0 1739 switch (bytecode()) {
duke@0 1740 case Bytecodes::_i2l: // fall through
duke@0 1741 case Bytecodes::_i2f: // fall through
duke@0 1742 case Bytecodes::_i2d: // fall through
duke@0 1743 case Bytecodes::_i2b: // fall through
duke@0 1744 case Bytecodes::_i2c: // fall through
duke@0 1745 case Bytecodes::_i2s: tos_in = itos; break;
duke@0 1746 case Bytecodes::_l2i: // fall through
duke@0 1747 case Bytecodes::_l2f: // fall through
duke@0 1748 case Bytecodes::_l2d: tos_in = ltos; break;
duke@0 1749 case Bytecodes::_f2i: // fall through
duke@0 1750 case Bytecodes::_f2l: // fall through
duke@0 1751 case Bytecodes::_f2d: tos_in = ftos; break;
duke@0 1752 case Bytecodes::_d2i: // fall through
duke@0 1753 case Bytecodes::_d2l: // fall through
duke@0 1754 case Bytecodes::_d2f: tos_in = dtos; break;
duke@0 1755 default : ShouldNotReachHere();
duke@0 1756 }
duke@0 1757 switch (bytecode()) {
duke@0 1758 case Bytecodes::_l2i: // fall through
duke@0 1759 case Bytecodes::_f2i: // fall through
duke@0 1760 case Bytecodes::_d2i: // fall through
duke@0 1761 case Bytecodes::_i2b: // fall through
duke@0 1762 case Bytecodes::_i2c: // fall through
duke@0 1763 case Bytecodes::_i2s: tos_out = itos; break;
duke@0 1764 case Bytecodes::_i2l: // fall through
duke@0 1765 case Bytecodes::_f2l: // fall through
duke@0 1766 case Bytecodes::_d2l: tos_out = ltos; break;
duke@0 1767 case Bytecodes::_i2f: // fall through
duke@0 1768 case Bytecodes::_l2f: // fall through
duke@0 1769 case Bytecodes::_d2f: tos_out = ftos; break;
duke@0 1770 case Bytecodes::_i2d: // fall through
duke@0 1771 case Bytecodes::_l2d: // fall through
duke@0 1772 case Bytecodes::_f2d: tos_out = dtos; break;
duke@0 1773 default : ShouldNotReachHere();
duke@0 1774 }
duke@0 1775 transition(tos_in, tos_out);
duke@0 1776 }
duke@0 1777 #endif // ASSERT
duke@0 1778
duke@0 1779 static const int64_t is_nan = 0x8000000000000000L;
duke@0 1780
duke@0 1781 // Conversion
duke@0 1782 switch (bytecode()) {
duke@0 1783 case Bytecodes::_i2l:
duke@0 1784 __ movslq(rax, rax);
duke@0 1785 break;
duke@0 1786 case Bytecodes::_i2f:
duke@0 1787 __ cvtsi2ssl(xmm0, rax);
duke@0 1788 break;
duke@0 1789 case Bytecodes::_i2d:
duke@0 1790 __ cvtsi2sdl(xmm0, rax);
duke@0 1791 break;
duke@0 1792 case Bytecodes::_i2b:
duke@0 1793 __ movsbl(rax, rax);
duke@0 1794 break;
duke@0 1795 case Bytecodes::_i2c:
duke@0 1796 __ movzwl(rax, rax);
duke@0 1797 break;
duke@0 1798 case Bytecodes::_i2s:
duke@0 1799 __ movswl(rax, rax);
duke@0 1800 break;
duke@0 1801 case Bytecodes::_l2i:
duke@0 1802 __ movl(rax, rax);
duke@0 1803 break;
duke@0 1804 case Bytecodes::_l2f:
duke@0 1805 __ cvtsi2ssq(xmm0, rax);
duke@0 1806 break;
duke@0 1807 case Bytecodes::_l2d:
duke@0 1808 __ cvtsi2sdq(xmm0, rax);
duke@0 1809 break;
duke@0 1810 case Bytecodes::_f2i:
duke@0 1811 {
duke@0 1812 Label L;
duke@0 1813 __ cvttss2sil(rax, xmm0);
duke@0 1814 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
duke@0 1815 __ jcc(Assembler::notEqual, L);
duke@0 1816 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
duke@0 1817 __ bind(L);
duke@0 1818 }
duke@0 1819 break;
duke@0 1820 case Bytecodes::_f2l:
duke@0 1821 {
duke@0 1822 Label L;
duke@0 1823 __ cvttss2siq(rax, xmm0);
duke@0 1824 // NaN or overflow/underflow?
duke@0 1825 __ cmp64(rax, ExternalAddress((address) &is_nan));
duke@0 1826 __ jcc(Assembler::notEqual, L);
duke@0 1827 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
duke@0 1828 __ bind(L);
duke@0 1829 }
duke@0 1830 break;
duke@0 1831 case Bytecodes::_f2d:
duke@0 1832 __ cvtss2sd(xmm0, xmm0);
duke@0 1833 break;
duke@0 1834 case Bytecodes::_d2i:
duke@0 1835 {
duke@0 1836 Label L;
duke@0 1837 __ cvttsd2sil(rax, xmm0);
duke@0 1838 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
duke@0 1839 __ jcc(Assembler::notEqual, L);
duke@0 1840 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
duke@0 1841 __ bind(L);
duke@0 1842 }
duke@0 1843 break;
duke@0 1844 case Bytecodes::_d2l:
duke@0 1845 {
duke@0 1846 Label L;
duke@0 1847 __ cvttsd2siq(rax, xmm0);
duke@0 1848 // NaN or overflow/underflow?
duke@0 1849 __ cmp64(rax, ExternalAddress((address) &is_nan));
duke@0 1850 __ jcc(Assembler::notEqual, L);
duke@0 1851 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
duke@0 1852 __ bind(L);
duke@0 1853 }
duke@0 1854 break;
duke@0 1855 case Bytecodes::_d2f:
duke@0 1856 __ cvtsd2ss(xmm0, xmm0);
duke@0 1857 break;
duke@0 1858 default:
duke@0 1859 ShouldNotReachHere();
duke@0 1860 }
mockner@7970 1861 #else
mockner@7970 1862 // Checking
mockner@7970 1863 #ifdef ASSERT
mockner@7970 1864 { TosState tos_in = ilgl;
mockner@7970 1865 TosState tos_out = ilgl;
mockner@7970 1866 switch (bytecode()) {
mockner@7970 1867 case Bytecodes::_i2l: // fall through
mockner@7970 1868 case Bytecodes::_i2f: // fall through
mockner@7970 1869 case Bytecodes::_i2d: // fall through
mockner@7970 1870 case Bytecodes::_i2b: // fall through
mockner@7970 1871 case Bytecodes::_i2c: // fall through
mockner@7970 1872 case Bytecodes::_i2s: tos_in = itos; break;
mockner@7970 1873 case Bytecodes::_l2i: // fall through
mockner@7970 1874 case Bytecodes::_l2f: // fall through
mockner@7970 1875 case Bytecodes::_l2d: tos_in = ltos; break;
mockner@7970 1876 case Bytecodes::_f2i: // fall through
mockner@7970 1877 case Bytecodes::_f2l: // fall through
mockner@7970 1878 case Bytecodes::_f2d: tos_in = ftos; break;
mockner@7970 1879 case Bytecodes::_d2i: // fall through
mockner@7970 1880 case Bytecodes::_d2l: // fall through
mockner@7970 1881 case Bytecodes::_d2f: tos_in = dtos; break;
mockner@7970 1882 default : ShouldNotReachHere();
mockner@7970 1883 }
mockner@7970 1884 switch (bytecode()) {
mockner@7970 1885 case Bytecodes::_l2i: // fall through
mockner@7970 1886 case Bytecodes::_f2i: // fall through
mockner@7970 1887 case Bytecodes::_d2i: // fall through
mockner@7970 1888 case Bytecodes::_i2b: // fall through
mockner@7970 1889 case Bytecodes::_i2c: // fall through
mockner@7970 1890 case Bytecodes::_i2s: tos_out = itos; break;
mockner@7970 1891 case Bytecodes::_i2l: // fall through
mockner@7970 1892 case Bytecodes::_f2l: // fall through
mockner@7970 1893 case Bytecodes::_d2l: tos_out = ltos; break;
mockner@7970 1894 case Bytecodes::_i2f: // fall through
mockner@7970 1895 case Bytecodes::_l2f: // fall through
mockner@7970 1896 case Bytecodes::_d2f: tos_out = ftos; break;
mockner@7970 1897 case Bytecodes::_i2d: // fall through
mockner@7970 1898 case Bytecodes::_l2d: // fall through
mockner@7970 1899 case Bytecodes::_f2d: tos_out = dtos; break;
mockner@7970 1900 default : ShouldNotReachHere();
mockner@7970 1901 }
mockner@7970 1902 transition(tos_in, tos_out);
mockner@7970 1903 }
mockner@7970 1904 #endif // ASSERT
mockner@7970 1905
mockner@7970 1906 // Conversion
mockner@7970 1907 // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
mockner@7970 1908 switch (bytecode()) {
mockner@7970 1909 case Bytecodes::_i2l:
mockner@7970 1910 __ extend_sign(rdx, rax);
mockner@7970 1911 break;
mockner@7970 1912 case Bytecodes::_i2f:
zmajo@8879 1913 if (UseSSE >= 1) {
zmajo@8879 1914 __ cvtsi2ssl(xmm0, rax);
zmajo@8879 1915 } else {
zmajo@8879 1916 __ push(rax); // store int on tos
zmajo@8879 1917 __ fild_s(at_rsp()); // load int to ST0
zmajo@8879 1918 __ f2ieee(); // truncate to float size
zmajo@8879 1919 __ pop(rcx); // adjust rsp
zmajo@8879 1920 }
mockner@7970 1921 break;
mockner@7970 1922 case Bytecodes::_i2d:
zmajo@8879 1923 if (UseSSE >= 2) {
zmajo@8879 1924 __ cvtsi2sdl(xmm0, rax);
zmajo@8879 1925 } else {
mockner@7970 1926 __ push(rax); // add one slot for d2ieee()
mockner@7970 1927 __ push(rax); // store int on tos
mockner@7970 1928 __ fild_s(at_rsp()); // load int to ST0
mockner@7970 1929 __ d2ieee(); // truncate to double size
mockner@7970 1930 __ pop(rcx); // adjust rsp
mockner@7970 1931 __ pop(rcx);
zmajo@8879 1932 }
mockner@7970 1933 break;
mockner@7970 1934 case Bytecodes::_i2b:
mockner@7970 1935 __ shll(rax, 24); // truncate upper 24 bits
mockner@7970 1936 __ sarl(rax, 24); // and sign-extend byte
mockner@7970 1937 LP64_ONLY(__ movsbl(rax, rax));
mockner@7970 1938 break;
mockner@7970 1939 case Bytecodes::_i2c:
mockner@7970 1940 __ andl(rax, 0xFFFF); // truncate upper 16 bits
mockner@7970 1941 LP64_ONLY(__ movzwl(rax, rax));
mockner@7970 1942 break;
mockner@7970 1943 case Bytecodes::_i2s:
mockner@7970 1944 __ shll(rax, 16); // truncate upper 16 bits
mockner@7970 1945 __ sarl(rax, 16); // and sign-extend short
mockner@7970 1946 LP64_ONLY(__ movswl(rax, rax));
mockner@7970 1947 break;
mockner@7970 1948 case Bytecodes::_l2i:
mockner@7970 1949 /* nothing to do */
mockner@7970 1950 break;
mockner@7970 1951 case Bytecodes::_l2f:
zmajo@8879 1952 // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
zmajo@8879 1953 // 64-bit long values to floats. On 32-bit platforms it is not possible
zmajo@8879 1954 // to use that instruction with 64-bit operands, therefore the FPU is
zmajo@8879 1955 // used to perform the conversion.
mockner@7970 1956 __ push(rdx); // store long on tos
mockner@7970 1957 __ push(rax);
mockner@7970 1958 __ fild_d(at_rsp()); // load long to ST0
mockner@7970 1959 __ f2ieee(); // truncate to float size
mockner@7970 1960 __ pop(rcx); // adjust rsp
mockner@7970 1961 __ pop(rcx);
zmajo@8879 1962 if (UseSSE >= 1) {
zmajo@8879 1963 __ push_f();
zmajo@8879 1964 __ pop_f(xmm0);
zmajo@8879 1965 }
mockner@7970 1966 break;
mockner@7970 1967 case Bytecodes::_l2d:
zmajo@8879 1968 // On 32-bit platforms the FPU is used for conversion because on
zmajo@8879 1969 // 32-bit platforms it is not not possible to use the cvtsi2sdq
zmajo@8879 1970 // instruction with 64-bit operands.
mockner@7970 1971 __ push(rdx); // store long on tos
mockner@7970 1972 __ push(rax);
mockner@7970 1973 __ fild_d(at_rsp()); // load long to ST0
mockner@7970 1974 __ d2ieee(); // truncate to double size
mockner@7970 1975 __ pop(rcx); // adjust rsp
mockner@7970 1976 __ pop(rcx);
zmajo@8879 1977 if (UseSSE >= 2) {
zmajo@8879 1978 __ push_d();
zmajo@8879 1979 __ pop_d(xmm0);
zmajo@8879 1980 }
mockner@7970 1981 break;
mockner@7970 1982 case Bytecodes::_f2i:
zmajo@8879 1983 // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
zmajo@8879 1984 // as it returns 0 for any NaN.
zmajo@8879 1985 if (UseSSE >= 1) {
zmajo@8879 1986 __ push_f(xmm0);
zmajo@8879 1987 } else {
zmajo@8879 1988 __ push(rcx); // reserve space for argument
zmajo@8879 1989 __ fstp_s(at_rsp()); // pass float argument on stack
zmajo@8879 1990 }
mockner@7970 1991 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
mockner@7970 1992 break;
mockner@7970 1993 case Bytecodes::_f2l:
zmajo@8879 1994 // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
zmajo@8879 1995 // as it returns 0 for any NaN.
zmajo@8879 1996 if (UseSSE >= 1) {
zmajo@8879 1997 __ push_f(xmm0);
zmajo@8879 1998 } else {
zmajo@8879 1999 __ push(rcx); // reserve space for argument
zmajo@8879 2000 __ fstp_s(at_rsp()); // pass float argument on stack
zmajo@8879 2001 }
mockner@7970 2002 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
mockner@7970 2003 break;
mockner@7970 2004 case Bytecodes::_f2d:
zmajo@8879 2005 if (UseSSE < 1) {
zmajo@8879 2006 /* nothing to do */
zmajo@8879 2007 } else if (UseSSE == 1) {
zmajo@8879 2008 __ push_f(xmm0);
zmajo@8879 2009 __ pop_f();
zmajo@8879 2010 } else { // UseSSE >= 2
zmajo@8879 2011 __ cvtss2sd(xmm0, xmm0);
zmajo@8879 2012 }
mockner@7970 2013 break;
mockner@7970 2014 case Bytecodes::_d2i:
zmajo@8879 2015 if (UseSSE >= 2) {
zmajo@8879 2016 __ push_d(xmm0);
zmajo@8879 2017 } else {
zmajo@8879 2018 __ push(rcx); // reserve space for argument
zmajo@8879 2019 __ push(rcx);
zmajo@8879 2020 __ fstp_d(at_rsp()); // pass double argument on stack
zmajo@8879 2021 }
mockner@7970 2022 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
mockner@7970 2023 break;
mockner@7970 2024 case Bytecodes::_d2l:
zmajo@8879 2025 if (UseSSE >= 2) {
zmajo@8879 2026 __ push_d(xmm0);
zmajo@8879 2027 } else {
zmajo@8879 2028 __ push(rcx); // reserve space for argument
zmajo@8879 2029 __ push(rcx);
zmajo@8879 2030 __ fstp_d(at_rsp()); // pass double argument on stack
zmajo@8879 2031 }
mockner@7970 2032 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
mockner@7970 2033 break;
mockner@7970 2034 case Bytecodes::_d2f:
zmajo@8879 2035 if (UseSSE <= 1) {
zmajo@8879 2036 __ push(rcx); // reserve space for f2ieee()
zmajo@8879 2037 __ f2ieee(); // truncate to float size
zmajo@8879 2038 __ pop(rcx); // adjust rsp
zmajo@8879 2039 if (UseSSE == 1) {
zmajo@8879 2040 // The cvtsd2ss instruction is not available if UseSSE==1, therefore
zmajo@8879 2041 // the conversion is performed using the FPU in this case.
zmajo@8879 2042 __ push_f();
zmajo@8879 2043 __ pop_f(xmm0);
zmajo@8879 2044 }
zmajo@8879 2045 } else { // UseSSE >= 2
zmajo@8879 2046 __ cvtsd2ss(xmm0, xmm0);
zmajo@8879 2047 }
mockner@7970 2048 break;
mockner@7970 2049 default :
mockner@7970 2050 ShouldNotReachHere();
mockner@7970 2051 }
mockner@7970 2052 #endif
duke@0 2053 }
duke@0 2054
duke@0 2055 void TemplateTable::lcmp() {
duke@0 2056 transition(ltos, itos);
mockner@7970 2057 #ifdef _LP64
duke@0 2058 Label done;
duke@0 2059 __ pop_l(rdx);
duke@0 2060 __ cmpq(rdx, rax);
duke@0 2061 __ movl(rax, -1);
duke@0 2062 __ jccb(Assembler::less, done);
duke@0 2063 __ setb(Assembler::notEqual, rax);
duke@0 2064 __ movzbl(rax, rax);
duke@0 2065 __ bind(done);
mockner@7970 2066 #else
mockner@7970 2067
mockner@7970 2068 // y = rdx:rax
mockner@7970 2069 __ pop_l(rbx, rcx); // get x = rcx:rbx
mockner@7970 2070 __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
mockner@7970 2071 __ mov(rax, rcx);
mockner@7970 2072 #endif
duke@0 2073 }
duke@0 2074
duke@0 2075 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
zmajo@8879 2076 if ((is_float && UseSSE >= 1) ||
zmajo@8879 2077 (!is_float && UseSSE >= 2)) {
zmajo@8879 2078 Label done;
zmajo@8879 2079 if (is_float) {
zmajo@8879 2080 // XXX get rid of pop here, use ... reg, mem32
zmajo@8879 2081 __ pop_f(xmm1);
zmajo@8879 2082 __ ucomiss(xmm1, xmm0);
zmajo@8879 2083 } else {
zmajo@8879 2084 // XXX get rid of pop here, use ... reg, mem64
zmajo@8879 2085 __ pop_d(xmm1);
zmajo@8879 2086 __ ucomisd(xmm1, xmm0);
zmajo@8879 2087 }
zmajo@8879 2088 if (unordered_result < 0) {
zmajo@8879 2089 __ movl(rax, -1);
zmajo@8879 2090 __ jccb(Assembler::parity, done);
zmajo@8879 2091 __ jccb(Assembler::below, done);
zmajo@8879 2092 __ setb(Assembler::notEqual, rdx);
zmajo@8879 2093 __ movzbl(rax, rdx);
zmajo@8879 2094 } else {
zmajo@8879 2095 __ movl(rax, 1);
zmajo@8879 2096 __ jccb(Assembler::parity, done);
zmajo@8879 2097 __ jccb(Assembler::above, done);
zmajo@8879 2098 __ movl(rax, 0);
zmajo@8879 2099 __ jccb(Assembler::equal, done);
zmajo@8879 2100 __ decrementl(rax);
zmajo@8879 2101 }
zmajo@8879 2102 __ bind(done);
zmajo@8879 2103 } else {
mockner@7970 2104 #ifdef _LP64
zmajo@8879 2105 ShouldNotReachHere();
zmajo@8879 2106 #else
zmajo@8879 2107 if (is_float) {
zmajo@8879 2108 __ fld_s(at_rsp());
zmajo@8879 2109 } else {
zmajo@8879 2110 __ fld_d(at_rsp());
zmajo@8879 2111 __ pop(rdx);
zmajo@8879 2112 }
zmajo@8879 2113 __ pop(rcx);
zmajo@8879 2114 __ fcmp2int(rax, unordered_result < 0);
zmajo@8879 2115 #endif // _LP64
duke@0 2116 }
duke@0 2117 }
duke@0 2118
duke@0 2119 void TemplateTable::branch(bool is_jsr, bool is_wide) {
fparain@13041 2120 if (ValueTypesThreadLocalRecycling) {
fparain@13041 2121 Label no_vt_recycling, no_fixing_required;
fparain@13041 2122 const Register thread1 = NOT_LP64(rbx) LP64_ONLY(r15_thread);
fparain@13041 2123 NOT_LP64(__ get_thread(thread1));
fparain@13041 2124 __ movptr(rbx, Address(thread1, in_bytes(JavaThread::vt_alloc_ptr_offset())));
fparain@13041 2125 __ testptr(rbx, rbx);
fparain@13041 2126 __ jcc(Assembler::zero, no_vt_recycling);
fparain@13041 2127 __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
fparain@13041 2128 __ testptr(rcx, rcx);
fparain@13041 2129 __ jcc(Assembler::notZero, no_fixing_required);
fparain@13041 2130 // vt_alloc_ptr in JavaThread is non-null but frame vt_alloc_ptr is null
fparain@13041 2131 // which means frame vt_alloc_ptr needs to be initialized
fparain@13041 2132 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::fix_frame_vt_alloc_ptr));
fparain@13041 2133 __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
fparain@13041 2134 __ bind(no_fixing_required);
fparain@13041 2135 __ testptr(rcx, rbx);
fparain@13041 2136 __ jcc(Assembler::equal, no_vt_recycling);
fparain@13041 2137 __ andptr(rcx, VTBufferChunk::chunk_mask());
fparain@13041 2138 __ movl(rcx, Address(rcx, VTBufferChunk::index_offset()));
fparain@13041 2139 __ andptr(rbx, VTBufferChunk::chunk_mask());
fparain@13041 2140 __ movl(rbx, Address(rbx, VTBufferChunk::index_offset()));
fparain@13041 2141 __ subl(rbx, rcx);
fparain@13041 2142 __ get_method(rcx);
fparain@13041 2143 __ movl(rcx, Address(rcx, Method::max_vt_buffer_offset()));
fparain@13041 2144 __ cmpl(rbx, rcx);
fparain@13041 2145 __ jcc(Assembler::lessEqual, no_vt_recycling);
fparain@13041 2146 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::recycle_buffered_values));
fparain@13041 2147 __ bind(no_vt_recycling);
fparain@13041 2148 }
fparain@13041 2149
duke@0 2150 __ get_method(rcx); // rcx holds method
duke@0 2151 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
duke@0 2152 // holds bumped taken count
duke@0 2153
jiangli@4501 2154 const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
duke@0 2155 InvocationCounter::counter_offset();
jiangli@4501 2156 const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
duke@0 2157 InvocationCounter::counter_offset();
duke@0 2158
duke@0 2159 // Load up edx with the branch displacement
mgerdin@5624 2160 if (is_wide) {
mgerdin@5624 2161 __ movl(rdx, at_bcp(1));
mgerdin@5624 2162 } else {
mgerdin@5624 2163 __ load_signed_short(rdx, at_bcp(1));
mgerdin@5624 2164 }
duke@0 2165 __ bswapl(rdx);
duke@0 2166
duke@0 2167 if (!is_wide) {
duke@0 2168 __ sarl(rdx, 16);
duke@0 2169 }
mockner@7970 2170 LP64_ONLY(__ movl2ptr(rdx, rdx));
duke@0 2171
duke@0 2172 // Handle all the JSR stuff here, then exit.
duke@0 2173 // It's much shorter and cleaner than intermingling with the non-JSR
twisti@605 2174 // normal-branch stuff occurring below.
duke@0 2175 if (is_jsr) {
duke@0 2176 // Pre-load the next target bytecode into rbx
mockner@7970 2177 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
duke@0 2178
duke@0 2179 // compute return address as bci in rax
never@304 2180 __ lea(rax, at_bcp((is_wide ? 5 : 3) -
coleenp@3602 2181 in_bytes(ConstMethod::codes_offset())));
coleenp@3602 2182 __ subptr(rax, Address(rcx, Method::const_offset()));
duke@0 2183 // Adjust the bcp in r13 by the displacement in rdx
mockner@7970 2184 __ addptr(rbcp, rdx);
duke@0 2185 // jsr returns atos that is not an oop
duke@0 2186 __ push_i(rax);
duke@0 2187 __ dispatch_only(vtos);
duke@0 2188 return;
duke@0 2189 }
duke@0 2190
duke@0 2191 // Normal (non-jsr) branch handling
duke@0 2192
duke@0 2193 // Adjust the bcp in r13 by the displacement in rdx
mockner@7970 2194 __ addptr(rbcp, rdx);
duke@0 2195
duke@0 2196 assert(UseLoopCounter || !UseOnStackReplacement,
duke@0 2197 "on-stack-replacement requires loop counters");
duke@0 2198 Label backedge_counter_overflow;
duke@0 2199 Label profile_method;
duke@0 2200 Label dispatch;
duke@0 2201 if (UseLoopCounter) {
duke@0 2202 // increment backedge counter for backward branches
duke@0 2203 // rax: MDO
mockner@7970 2204 // rbx: MDO bumped taken-count
duke@0 2205 // rcx: method
duke@0 2206 // rdx: target offset
duke@0 2207 // r13: target bcp
duke@0 2208 // r14: locals pointer
duke@0 2209 __ testl(rdx, rdx); // check if forward or backward branch
duke@0 2210 __ jcc(Assembler::positive, dispatch); // count only if backward branch
jiangli@4501 2211
jiangli@4501 2212 // check if MethodCounters exists
jiangli@4501 2213 Label has_counters;
jiangli@4501 2214 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
jiangli@4501 2215 __ testptr(rax, rax);
jiangli@4501 2216 __ jcc(Assembler::notZero, has_counters);
jiangli@4501 2217 __ push(rdx);
jiangli@4501 2218 __ push(rcx);
jiangli@4501 2219 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
jiangli@4501 2220 rcx);
jiangli@4501 2221 __ pop(rcx);
jiangli@4501 2222 __ pop(rdx);
jiangli@4501 2223 __ movptr(rax, Address(rcx, Method::method_counters_offset()));
coleenp@8843 2224 __ testptr(rax, rax);
jiangli@4501 2225 __ jcc(Assembler::zero, dispatch);
jiangli@4501 2226 __ bind(has_counters);
jiangli@4501 2227
iveresov@1703 2228 if (TieredCompilation) {
iveresov@1703 2229 Label no_mdo;
iveresov@1703 2230 int increment = InvocationCounter::count_increment;
iveresov@1703 2231 if (ProfileInterpreter) {
iveresov@1703 2232 // Are we profiling?
coleenp@3602 2233 __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
iveresov@1703 2234 __ testptr(rbx, rbx);
iveresov@1703 2235 __ jccb(Assembler::zero, no_mdo);
iveresov@1703 2236 // Increment the MDO backedge counter
coleenp@3602 2237 const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
iveresov@1703 2238 in_bytes(InvocationCounter::counter_offset()));
zmajo@7740 2239 const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
iveresov@1703 2240 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
iveresov@1703 2241 rax, false, Assembler::zero, &backedge_counter_overflow);
iveresov@1703 2242 __ jmp(dispatch);
duke@0 2243 }
iveresov@1703 2244 __ bind(no_mdo);
jiangli@4501 2245 // Increment backedge counter in MethodCounters*
jiangli@4501 2246 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
mockner@7970 2247 const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
iveresov@1703 2248 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
iveresov@1703 2249 rax, false, Assembler::zero, &backedge_counter_overflow);
zmajo@7740 2250 } else { // not TieredCompilation
iveresov@1703 2251 // increment counter
jiangli@4501 2252 __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
iveresov@1703 2253 __ movl(rax, Address(rcx, be_offset)); // load backedge counter
iveresov@1703 2254 __ incrementl(rax, InvocationCounter::count_increment); // increment counter
iveresov@1703 2255 __ movl(Address(rcx, be_offset), rax); // store counter
iveresov@1703 2256
iveresov@1703 2257 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter
jiangli@4501 2258
iveresov@1703 2259 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
iveresov@1703 2260 __ addl(rax, Address(rcx, be_offset)); // add both counters
iveresov@1703 2261
iveresov@1703 2262 if (ProfileInterpreter) {
iveresov@1703 2263 // Test to see if we should create a method data oop
zmajo@7740 2264 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
iveresov@1703 2265 __ jcc(Assembler::less, dispatch);
iveresov@1703 2266
iveresov@1703 2267 // if no method data exists, go to profile method
iveresov@1703 2268 __ test_method_data_pointer(rax, profile_method);
iveresov@1703 2269
iveresov@1703 2270 if (UseOnStackReplacement) {
mockner@7970 2271 // check for overflow against rbx which is the MDO taken count
zmajo@7740 2272 __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
iveresov@1703 2273 __ jcc(Assembler::below, dispatch);
iveresov@1703 2274
iveresov@1703 2275 // When ProfileInterpreter is on, the backedge_count comes
coleenp@3602 2276 // from the MethodData*, which value does not get reset on
iveresov@1703 2277 // the call to frequency_counter_overflow(). To avoid
iveresov@1703 2278 // excessive calls to the overflow routine while the method is
iveresov@1703 2279 // being compiled, add a second test to make sure the overflow
iveresov@1703 2280 // function is called only once every overflow_frequency.
iveresov@1703 2281 const int overflow_frequency = 1024;
iveresov@1703 2282 __ andl(rbx, overflow_frequency - 1);
iveresov@1703 2283 __ jcc(Assembler::zero, backedge_counter_overflow);
iveresov@1703 2284
iveresov@1703 2285 }
iveresov@1703 2286 } else {
iveresov@1703 2287 if (UseOnStackReplacement) {
mockner@7970 2288 // check for overflow against rax, which is the sum of the
iveresov@1703 2289 // counters
zmajo@7740 2290 __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
iveresov@1703 2291 __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
iveresov@1703 2292
iveresov@1703 2293 }
duke@0 2294 }
duke@0 2295 }
duke@0 2296 __ bind(dispatch);
duke@0 2297 }
duke@0 2298
duke@0 2299 // Pre-load the next target bytecode into rbx
mockner@7970 2300 __ load_unsigned_byte(rbx, Address(rbcp, 0));
duke@0 2301
duke@0 2302 // continue with the bytecode @ target
mockner@7970 2303 // rax: return bci for jsr's, unused otherwise
mockner@7970 2304 // rbx: target bytecode
duke@0 2305 // r13: target bcp
duke@0 2306 __ dispatch_only(vtos);
duke@0 2307
duke@0 2308 if (UseLoopCounter) {
duke@0 2309 if (ProfileInterpreter) {
duke@0 2310 // Out-of-line code to allocate method data oop.
duke@0 2311 __ bind(profile_method);
iveresov@2003 2312 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
iveresov@2003 2313 __ set_method_data_pointer_for_bcp();
duke@0 2314 __ jmp(dispatch);
duke@0 2315 }
duke@0 2316
duke@0 2317 if (UseOnStackReplacement) {
duke@0 2318 // invocation counter overflow
duke@0 2319 __ bind(backedge_counter_overflow);
never@304 2320 __ negptr(rdx);
mockner@7970 2321 __ addptr(rdx, rbcp); // branch bcp
duke@0 2322 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
duke@0 2323 __ call_VM(noreg,
duke@0 2324 CAST_FROM_FN_PTR(address,
duke@0 2325 InterpreterRuntime::frequency_counter_overflow),
duke@0 2326 rdx);
duke@0 2327
duke@0 2328 // rax: osr nmethod (osr ok) or NULL (osr not possible)
duke@0 2329 // rdx: scratch
duke@0 2330 // r14: locals pointer
duke@0 2331 // r13: bcp
never@304 2332 __ testptr(rax, rax); // test result
duke@0 2333 __ jcc(Assembler::zero, dispatch); // no osr if null
duke@0 2334 // nmethod may have been invalidated (VM may block upon call_VM return)
thartmann@7072 2335 __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
thartmann@7072 2336 __ jcc(Assembler::notEqual, dispatch);
duke@0 2337
mgronlun@12735 2338 // We have the address of an on stack replacement routine in rax.
mgronlun@12735 2339 // In preparation of invoking it, first we must migrate the locals
mgronlun@12735 2340 // and monitors from off the interpreter frame on the stack.
mgronlun@12735 2341 // Ensure to save the osr nmethod over the migration call,
mgronlun@12735 2342 // it will be preserved in rbx.
mgronlun@12735 2343 __ mov(rbx, rax);
mgronlun@12735 2344
mockner@7970 2345 NOT_LP64(__ get_thread(rcx));
duke@0 2346
duke@0 2347 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
duke@0 2348
mockner@7970 2349 // rax is OSR buffer, move it to expected parameter location
mockner@7970 2350 LP64_ONLY(__ mov(j_rarg0, rax));
mockner@7970 2351 NOT_LP64(__ mov(rcx, rax));
duke@0 2352 // We use j_rarg definitions here so that registers don't conflict as parameter
duke@0 2353 // registers change across platforms as we are in the midst of a calling
duke@0 2354 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
duke@0 2355
mockner@7970 2356 const Register retaddr = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
mockner@7970 2357 const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
mockner@7970 2358
duke@0 2359 // pop the interpreter frame
never@304 2360 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
duke@0 2361 __ leave(); // remove frame anchor
never@304 2362 __ pop(retaddr); // get return address
never@304 2363 __ mov(rsp, sender_sp); // set sp to sender sp
duke@0 2364 // Ensure compiled code always sees stack at proper alignment
never@304 2365 __ andptr(rsp, -(StackAlignmentInBytes));
duke@0 2366
duke@0 2367 // unlike x86 we need no specialized return from compiled code
duke@0 2368 // to the interpreter or the call stub.
duke@0 2369
duke@0 2370 // push the return address
never@304 2371 __ push(retaddr);
duke@0 2372
duke@0 2373 // and begin the OSR nmethod
mgronlun@12735 2374 __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
duke@0 2375 }
duke@0 2376 }
duke@0 2377 }
duke@0 2378
duke@0 2379 void TemplateTable::if_0cmp(Condition cc) {
duke@0 2380 transition(itos, vtos);
duke@0 2381 // assume branch is more often taken than not (loops use backward branches)
duke@0 2382 Label not_taken;
duke@0 2383 __ testl(rax, rax);
duke@0 2384 __ jcc(j_not(cc), not_taken);
duke@0 2385 branch(false, false);
duke@0 2386 __ bind(not_taken);
duke@0 2387 __ profile_not_taken_branch(rax);
duke@0 2388 }
duke@0 2389
duke@0 2390 void TemplateTable::if_icmp(Condition cc) {
duke@0 2391 transition(itos, vtos);
duke@0 2392 // assume branch is more often taken than not (loops use backward branches)
duke@0 2393 Label not_taken;
duke@0 2394 __ pop_i(rdx);
duke@0 2395 __ cmpl(rdx, rax);
duke@0 2396 __ jcc(j_not(cc), not_taken);
duke@0 2397 branch(false, false);
duke@0 2398 __ bind(not_taken);
duke@0 2399 __ profile_not_taken_branch(rax);
duke@0 2400 }
duke@0 2401
duke@0 2402 void TemplateTable::if_nullcmp(Condition cc) {
duke@0 2403 transition(atos, vtos);
duke@0 2404 // assume branch is more often taken than not (loops use backward branches)
duke@0 2405 Label not_taken;
never@304 2406 __ testptr(rax, rax);
duke@0 2407 __ jcc(j_not(cc), not_taken);
duke@0 2408 branch(false, false);
duke@0 2409 __ bind(not_taken);
duke@0 2410 __ profile_not_taken_branch(rax);
duke@0 2411 }
duke@0 2412
duke@0 2413 void TemplateTable::if_acmp(Condition cc) {
duke@0 2414 transition(atos, vtos);
duke@0 2415 // assume branch is more often taken than not (loops use backward branches)
duke@0 2416 Label not_taken;
duke@0 2417 __ pop_ptr(rdx);
never@304 2418 __ cmpptr(rdx, rax);
duke@0 2419 __ jcc(j_not(cc), not_taken);
duke@0 2420 branch(false, false);
duke@0 2421 __ bind(not_taken);
duke@0 2422 __ profile_not_taken_branch(rax);
duke@0 2423 }
duke@0 2424
duke@0 2425 void TemplateTable::ret() {
duke@0 2426 transition(vtos, vtos);
duke@0 2427 locals_index(rbx);
mockner@7970 2428 LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
mockner@7970 2429 NOT_LP64(__ movptr(rbx, iaddress(rbx)));
duke@0 2430 __ profile_ret(rbx, rcx);
duke@0 2431 __ get_method(rax);
mockner@7970 2432 __ movptr(rbcp, Address(rax, Method::const_offset()));
mockner@7970 2433 __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
coleenp@3602 2434 ConstMethod::codes_offset()));
duke@0 2435 __ dispatch_next(vtos);
duke@0 2436 }
duke@0 2437
duke@0 2438 void TemplateTable::wide_ret() {
duke@0 2439 transition(vtos, vtos);
duke@0 2440 locals_index_wide(rbx);
never@304 2441 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
duke@0 2442 __ profile_ret(rbx, rcx);
duke@0 2443 __ get_method(rax);
mockner@7970 2444 __ movptr(rbcp, Address(rax, Method::const_offset()));
mockner@7970 2445 __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
duke@0 2446 __ dispatch_next(vtos);
duke@0 2447 }
duke@0 2448
duke@0 2449 void TemplateTable::tableswitch() {
duke@0 2450 Label default_case, continue_execution;
duke@0 2451 transition(itos, vtos);
mockner@7970 2452
mockner@7970 2453 // align r13/rsi
never@304 2454 __ lea(rbx, at_bcp(BytesPerInt));
never@304 2455 __ andptr(rbx, -BytesPerInt);
duke@0 2456 // load lo & hi
duke@0 2457 __ movl(rcx, Address(rbx, BytesPerInt));
duke@0 2458 __ movl(rdx, Address(rbx, 2 * BytesPerInt));
duke@0 2459 __ bswapl(rcx);
duke@0 2460 __ bswapl(rdx);
duke@0 2461 // check against lo & hi
duke@0 2462 __ cmpl(rax, rcx);
duke@0 2463 __ jcc(Assembler::less, default_case);
duke@0 2464 __ cmpl(rax, rdx);
duke@0 2465 __ jcc(Assembler::greater, default_case);
duke@0 2466 // lookup dispatch offset
duke@0 2467 __ subl(rax, rcx);
duke@0 2468 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
duke@0 2469 __ profile_switch_case(rax, rbx, rcx);
duke@0 2470 // continue execution
duke@0 2471 __ bind(continue_execution);
duke@0 2472 __ bswapl(rdx);
mockner@7970 2473 LP64_ONLY(__ movl2ptr(rdx, rdx));
mockner@7970 2474 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
mockner@7970 2475 __ addptr(rbcp, rdx);
duke@0 2476 __ dispatch_only(vtos);
duke@0 2477 // handle default
duke@0 2478 __ bind(default_case);
duke@0 2479 __ profile_switch_default(rax);
duke@0 2480 __ movl(rdx, Address(rbx, 0));
duke@0 2481 __ jmp(continue_execution);
duke@0 2482 }
duke@0 2483
duke@0 2484 void TemplateTable::lookupswitch() {
duke@0 2485 transition(itos, itos);
duke@0 2486 __ stop("lookupswitch bytecode should have been rewritten");
duke@0 2487 }
duke@0 2488
duke@0 2489 void TemplateTable::fast_linearswitch() {
duke@0 2490 transition(itos, vtos);
duke@0 2491 Label loop_entry, loop, found, continue_execution;
duke@0 2492 // bswap rax so we can avoid bswapping the table entries
duke@0 2493 __ bswapl(rax);
duke@0 2494 // align r13
never@304 2495 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
never@304 2496 // this instruction (change offsets
never@304 2497 // below)
never@304 2498 __ andptr(rbx, -BytesPerInt);
duke@0 2499 // set counter
duke@0 2500 __ movl(rcx, Address(rbx, BytesPerInt));
duke@0 2501 __ bswapl(rcx);
duke@0 2502 __ jmpb(loop_entry);
duke@0 2503 // table search
duke@0 2504 __ bind(loop);
duke@0 2505 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
duke@0 2506 __ jcc(Assembler::equal, found);
duke@0 2507 __ bind(loop_entry);
duke@0 2508 __ decrementl(rcx);
duke@0 2509 __ jcc(Assembler::greaterEqual, loop);
duke@0 2510 // default case
duke@0 2511 __ profile_switch_default(rax);
duke@0 2512 __ movl(rdx, Address(rbx, 0));
duke@0 2513 __ jmp(continue_execution);
duke@0 2514 // entry found -> get offset
duke@0 2515 __ bind(found);
duke@0 2516 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
duke@0 2517 __ profile_switch_case(rcx, rax, rbx);
duke@0 2518 // continue execution
duke@0 2519 __ bind(continue_execution);
duke@0 2520 __ bswapl(rdx);
never@304 2521 __ movl2ptr(rdx, rdx);
mockner@7970 2522 __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
mockner@7970 2523 __ addptr(rbcp, rdx);
duke@0 2524 __ dispatch_only(vtos);
duke@0 2525 }
duke@0 2526
duke@0 2527 void TemplateTable::fast_binaryswitch() {
duke@0 2528 transition(itos, vtos);
duke@0 2529 // Implementation using the following core algorithm:
duke@0 2530 //
duke@0 2531 // int binary_search(int key, LookupswitchPair* array, int n) {
duke@0 2532 // // Binary search according to "Methodik des Programmierens" by
duke@0 2533 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
duke@0 2534 // int i = 0;
duke@0 2535 // int j = n;
duke@0 2536 // while (i+1 < j) {
duke@0 2537 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
duke@0 2538 // // with Q: for all i: 0 <= i < n: key < a[i]
duke@0 2539 // // where a stands for the array and assuming that the (inexisting)
duke@0 2540 // // element a[n] is infinitely big.
duke@0 2541 // int h = (i + j) >> 1;
duke@0 2542 // // i < h < j
duke@0 2543 // if (key < array[h].fast_match()) {
duke@0 2544 // j = h;
duke@0 2545 // } else {
duke@0 2546 // i = h;
duke@0 2547 // }
duke@0 2548 // }
duke@0 2549 // // R: a[i] <= key < a[i+1] or Q
duke@0 2550 // // (i.e., if key is within array, i is the correct index)
duke@0 2551 // return i;
duke@0 2552 // }
duke@0 2553
duke@0 2554 // Register allocation
duke@0 2555 const Register key = rax; // already set (tosca)
duke@0 2556 const Register array = rbx;
duke@0 2557 const Register i = rcx;
duke@0 2558 const Register j = rdx;
duke@0 2559 const Register h = rdi;
duke@0 2560 const Register temp = rsi;
duke@0 2561
duke@0 2562 // Find array start
mockner@7970 2563 NOT_LP64(__ save_bcp());
mockner@7970 2564
never@304 2565 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
never@304 2566 // get rid of this
never@304 2567 // instruction (change
never@304 2568 // offsets below)
never@304 2569 __ andptr(array, -BytesPerInt);
duke@0 2570
duke@0 2571 // Initialize i & j
duke@0 2572 __ xorl(i, i); // i = 0;
duke@0 2573 __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
duke@0 2574
duke@0 2575 // Convert j into native byteordering
duke@0 2576 __ bswapl(j);
duke@0 2577
duke@0 2578 // And start
duke@0 2579 Label entry;
duke@0 2580 __ jmp(entry);
duke@0 2581
duke@0 2582 // binary search loop
duke@0 2583 {
duke@0 2584 Label loop;
duke@0 2585 __ bind(loop);
duke@0 2586 // int h = (i + j) >> 1;
duke@0 2587 __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
duke@0 2588 __ sarl(h, 1); // h = (i + j) >> 1;
duke@0 2589 // if (key < array[h].fast_match()) {
duke@0 2590 // j = h;
duke@0 2591 // } else {
duke@0 2592 // i = h;
duke@0 2593 // }
duke@0 2594 // Convert array[h].match to native byte-ordering before compare
duke@0 2595 __ movl(temp, Address(array, h, Address::times_8));
duke@0 2596 __ bswapl(temp);
duke@0 2597 __ cmpl(key, temp);
duke@0 2598 // j = h if (key < array[h].fast_match())
mockner@7970 2599 __ cmov32(Assembler::less, j, h);
duke@0 2600 // i = h if (key >= array[h].fast_match())
mockner@7970 2601 __ cmov32(Assembler::greaterEqual, i, h);
duke@0 2602 // while (i+1 < j)
duke@0 2603 __ bind(entry);
duke@0 2604 __ leal(h, Address(i, 1)); // i+1
duke@0 2605 __ cmpl(h, j); // i+1 < j
duke@0 2606 __ jcc(Assembler::less, loop);
duke@0 2607 }
duke@0 2608
duke@0 2609 // end of binary search, result index is i (must check again!)
duke@0 2610 Label default_case;
duke@0 2611 // Convert array[i].match to native byte-ordering before compare
duke@0 2612 __ movl(temp, Address(array, i, Address::times_8));
duke@0 2613 __ bswapl(temp);
duke@0 2614 __ cmpl(key, temp);
duke@0 2615 __ jcc(Assembler::notEqual, default_case);
duke@0 2616
duke@0 2617 // entry found -> j = offset
duke@0 2618 __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
duke@0 2619 __ profile_switch_case(i, key, array);
duke@0 2620 __ bswapl(j);
mockner@7970 2621 LP64_ONLY(__ movslq(j, j));
mockner@7970 2622
mockner@7970 2623 NOT_LP64(__ restore_bcp());
mockner@7970 2624 NOT_LP64(__ restore_locals()); // restore rdi
mockner@7970 2625
mockner@7970 2626 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
mockner@7970 2627 __ addptr(rbcp, j);
duke@0 2628 __ dispatch_only(vtos);
duke@0 2629
duke@0 2630 // default case -> j = default offset
duke@0 2631 __ bind(default_case);
duke@0 2632 __ profile_switch_default(i);
duke@0 2633 __ movl(j, Address(array, -2 * BytesPerInt));
duke@0 2634 __ bswapl(j);
mockner@7970 2635 LP64_ONLY(__ movslq(j, j));
mockner@7970 2636
mockner@7970 2637 NOT_LP64(__ restore_bcp());
mockner@7970 2638 NOT_LP64(__ restore_locals());
mockner@7970 2639
mockner@7970 2640 __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
mockner@7970 2641 __ addptr(rbcp, j);
duke@0 2642 __ dispatch_only(vtos);
duke@0 2643 }
duke@0 2644
duke@0 2645 void TemplateTable::_return(TosState state) {
duke@0 2646 transition(state, state);
mockner@7970 2647
duke@0 2648 assert(_desc->calls_vm(),
duke@0 2649 "inconsistent calls_vm information"); // call in remove_activation
duke@0 2650
duke@0 2651 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
duke@0 2652 assert(state == vtos, "only valid state");
coleenp@10885 2653 Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
mockner@7970 2654 __ movptr(robj, aaddress(0));
mockner@7970 2655 __ load_klass(rdi, robj);
stefank@2956 2656 __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
duke@0 2657 __ testl(rdi, JVM_ACC_HAS_FINALIZER);
duke@0 2658 Label skip_register_finalizer;
duke@0 2659 __ jcc(Assembler::zero, skip_register_finalizer);
duke@0 2660
mockner@7970 2661 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
duke@0 2662
duke@0 2663 __ bind(skip_register_finalizer);
duke@0 2664 }
duke@0 2665
fparain@13041 2666 if (state == qtos) {
fparain@13041 2667 const Register thread1 = NOT_LP64(rcx) LP64_ONLY(r15_thread);
fparain@13041 2668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::return_value), rax);
fparain@13041 2669 NOT_LP64(__ get_thread(thread1));
fparain@13041 2670 __ get_vm_result(rax, thread1);
fparain@13041 2671 }
coleenp@10885 2672 // Narrow result if state is itos but result type is smaller.
coleenp@10885 2673 // Need to narrow in the return bytecode rather than in generate_return_entry
coleenp@10885 2674 // since compiled code callers expect the result to already be narrowed.
coleenp@10885 2675 if (state == itos) {
coleenp@10885 2676 __ narrow(rax);
coleenp@10885 2677 }
fparain@13041 2678
fparain@13041 2679 #ifdef ASSERT
fparain@13041 2680 if (EnableMVT || EnableValhalla) {
fparain@13041 2681 if (state == atos) {
fparain@13041 2682 const Register thread1 = NOT_LP64(rcx) LP64_ONLY(r15_thread);
fparain@13041 2683 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::check_areturn), rax);
fparain@13041 2684 NOT_LP64(__ get_thread(thread1));
fparain@13041 2685 __ get_vm_result(rax, thread1);
fparain@13041 2686 }
fparain@13041 2687 }
fparain@13041 2688 #endif // ASSERT
fparain@13041 2689
roland@13020 2690 __ remove_activation(state, rbcp, true, true, true, state == qtos && ValueTypeReturnedAsFields);
coleenp@10885 2691
mockner@7970 2692 __ jmp(rbcp);
duke@0 2693 }
duke@0 2694
duke@0 2695 // ----------------------------------------------------------------------------
duke@0 2696 // Volatile variables demand their effects be made known to all CPU's
duke@0 2697 // in order. Store buffers on most chips allow reads & writes to
duke@0 2698 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
duke@0 2699 // without some kind of memory barrier (i.e., it's not sufficient that
duke@0 2700 // the interpreter does not reorder volatile references, the hardware
duke@0 2701 // also must not reorder them).
duke@0 2702 //
duke@0 2703 // According to the new Java Memory Model (JMM):
duke@0 2704 // (1) All volatiles are serialized wrt to each other. ALSO reads &
duke@0 2705 // writes act as aquire & release, so:
duke@0 2706 // (2) A read cannot let unrelated NON-volatile memory refs that
duke@0 2707 // happen after the read float up to before the read. It's OK for
duke@0 2708 // non-volatile memory refs that happen before the volatile read to
duke@0 2709 // float down below it.
duke@0 2710 // (3) Similar a volatile write cannot let unrelated NON-volatile
duke@0 2711 // memory refs that happen BEFORE the write float down to after the
duke@0 2712 // write. It's OK for non-volatile memory refs that happen after the
duke@0 2713 // volatile write to float up before it.
duke@0 2714 //
duke@0 2715 // We only put in barriers around volatile refs (they are expensive),
duke@0 2716 // not _between_ memory refs (that would require us to track the
duke@0 2717 // flavor of the previous memory refs). Requirements (2) and (3)
duke@0 2718 // require some barriers before volatile stores and after volatile
duke@0 2719 // loads. These nearly cover requirement (1) but miss the
duke@0 2720 // volatile-store-volatile-load case. This final case is placed after
duke@0 2721 // volatile-stores although it could just as well go before
duke@0 2722 // volatile-loads.
mockner@7970 2723
mockner@7970 2724 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
duke@0 2725 // Helper function to insert a is-volatile test and memory barrier
mockner@7970 2726 if(!os::is_MP()) return; // Not needed on single CPU
mockner@7970 2727 __ membar(order_constraint);
duke@0 2728 }
duke@0 2729
jrose@1485 2730 void TemplateTable::resolve_cache_and_index(int byte_no,
jrose@1485 2731 Register Rcache,
jrose@1485 2732 Register index,
jrose@1485 2733 size_t index_size) {
duke@0 2734 const Register temp = rbx;
coleenp@3602 2735 assert_different_registers(Rcache, index, temp);
jrose@1485 2736
duke@0 2737 Label resolved;
minqi@8102 2738
minqi@8102 2739 Bytecodes::Code code = bytecode();
minqi@8102 2740 switch (code) {
minqi@8102 2741 case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
minqi@8102 2742 case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
jwilhelm@13274 2743 default: break;
minqi@8102 2744 }
minqi@8102 2745
minqi@8102 2746 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
minqi@8102 2747 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
minqi@8102 2748 __ cmpl(temp, code); // have we resolved this bytecode?
minqi@8102 2749 __ jcc(Assembler::equal, resolved);
duke@0 2750
duke@0 2751 // resolve first time through
coleenp@8117 2752 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
minqi@8102 2753 __ movl(temp, code);
duke@0 2754 __ call_VM(noreg, entry, temp);
duke@0 2755 // Update registers with resolved info
jrose@1485 2756 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
duke@0 2757 __ bind(resolved);
duke@0 2758 }
duke@0 2759
twisti@3534 2760 // The cache and index registers must be set before call
duke@0 2761 void TemplateTable::load_field_cp_cache_entry(Register obj,
duke@0 2762 Register cache,
duke@0 2763 Register index,
duke@0 2764 Register off,
duke@0 2765 Register flags,
duke@0 2766 bool is_static = false) {
duke@0 2767 assert_different_registers(cache, index, flags, off);
duke@0 2768
coleenp@3602 2769 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
duke@0 2770 // Field offset
twisti@3534 2771 __ movptr(off, Address(cache, index, Address::times_ptr,
never@304 2772 in_bytes(cp_base_offset +
never@304 2773 ConstantPoolCacheEntry::f2_offset())));
duke@0 2774 // Flags
twisti@3534 2775 __ movl(flags, Address(cache, index, Address::times_ptr,
duke@0 2776 in_bytes(cp_base_offset +
duke@0 2777 ConstantPoolCacheEntry::flags_offset())));
duke@0 2778
duke@0 2779 // klass overwrite register
duke@0 2780 if (is_static) {
twisti@3534 2781 __ movptr(obj, Address(cache, index, Address::times_ptr,
never@304 2782 in_bytes(cp_base_offset +
never@304 2783 ConstantPoolCacheEntry::f1_offset())));
coleenp@3602 2784 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
coleenp@3602 2785 __ movptr(obj, Address(obj, mirror_offset));
duke@0 2786 }
duke@0 2787 }
duke@0 2788
duke@0 2789 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
duke@0 2790 Register method,
duke@0 2791 Register itable_index,
duke@0 2792 Register flags,
duke@0 2793 bool is_invokevirtual,
jrose@1485 2794 bool is_invokevfinal, /*unused*/
jrose@1485 2795 bool is_invokedynamic) {
duke@0 2796 // setup registers
duke@0 2797 const Register cache = rcx;
duke@0 2798 const Register index = rdx;
duke@0 2799 assert_different_registers(method, flags);
duke@0 2800 assert_different_registers(method, cache, index);
duke@0 2801 assert_different_registers(itable_index, flags);
duke@0 2802 assert_different_registers(itable_index, cache, index);
duke@0 2803 // determine constant pool cache field offsets
twisti@3534 2804 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
duke@0 2805 const int method_offset = in_bytes(
coleenp@3602 2806 ConstantPoolCache::base_offset() +
twisti@3534 2807 ((byte_no == f2_byte)
duke@0 2808 ? ConstantPoolCacheEntry::f2_offset()
duke@0 2809 : ConstantPoolCacheEntry::f1_offset()));
coleenp@3602 2810 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
duke@0 2811 ConstantPoolCacheEntry::flags_offset());
duke@0 2812 // access constant pool cache fields
coleenp@3602 2813 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
duke@0 2814 ConstantPoolCacheEntry::f2_offset());
duke@0 2815
twisti@3698 2816 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
coleenp@3602 2817 resolve_cache_and_index(byte_no, cache, index, index_size);
jrose@1485 2818 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
coleenp@3602 2819
duke@0 2820 if (itable_index != noreg) {
coleenp@3602 2821 // pick up itable or appendix index from f2 also:
jrose@1485 2822 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
duke@0 2823 }
jrose@1485 2824 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
duke@0 2825 }
duke@0 2826
mockner@7970 2827 // The registers cache and index expected to be set before call.
duke@0 2828 // Correct values of the cache and index registers are preserved.
mockner@7970 2829 void TemplateTable::jvmti_post_field_access(Register cache,
mockner@7970 2830 Register index,
mockner@7970 2831 bool is_static,
mockner@7970 2832 bool has_tos) {
duke@0 2833 if (JvmtiExport::can_post_field_access()) {
mockner@7970 2834 // Check to see if a field access watch has been set before we take
mockner@7970 2835 // the time to call into the VM.
duke@0 2836 Label L1;
duke@0 2837 assert_different_registers(cache, index, rax);
duke@0 2838 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
mockner@7970 2839 __ testl(rax,rax);
duke@0 2840 __ jcc(Assembler::zero, L1);
duke@0 2841
duke@0 2842 // cache entry pointer
mockner@7970 2843 __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
mockner@7970 2844 __ shll(index, LogBytesPerWord);
mockner@7970 2845 __ addptr(cache, index);
duke@0 2846 if (is_static) {
mockner@7970 2847 __ xorptr(rax, rax); // NULL object reference
duke@0 2848 } else {
mockner@7970 2849 __ pop(atos); // Get the object
mockner@7970 2850 __ verify_oop(rax);
mockner@7970 2851 __ push(atos); // Restore stack state
duke@0 2852 }
mockner@7970 2853 // rax,: object pointer or NULL
mockner@7970 2854 // cache: cache entry pointer
mockner@7970 2855 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
mockner@7970 2856 rax, cache);
duke@0 2857 __ get_cache_and_index_at_bcp(cache, index, 1);
duke@0 2858 __ bind(L1);
duke@0 2859 }
duke@0 2860 }
duke@0 2861
duke@0 2862 void TemplateTable::pop_and_check_object(Register r) {
duke@0 2863 __ pop_ptr(r);
duke@0 2864 __ null_check(r); // for field access must check obj.
duke@0 2865 __ verify_oop(r);
duke@0 2866 }
duke@0 2867
minqi@8102 2868 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
duke@0 2869 transition(vtos, vtos);
duke@0 2870
duke@0 2871 const Register cache = rcx;
duke@0 2872 const Register index = rdx;
mockner@7970 2873 const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
duke@0 2874 const Register off = rbx;
duke@0 2875 const Register flags = rax;
mockner@7970 2876 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
fparain@13581 2877 const Register flags2 = rdx;
duke@0 2878
coleenp@3602 2879 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
duke@0 2880 jvmti_post_field_access(cache, index, is_static, false);
duke@0 2881 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
duke@0 2882
mockner@7970 2883 const Address field(obj, off, Address::times_1, 0*wordSize);
mockner@7970 2884 NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
mockner@7970 2885
thartmann@13007 2886 Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType, notDouble;
duke@0 2887
fparain@13581 2888 __ movl(flags2, flags);
fparain@13581 2889
twisti@3534 2890 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
twisti@3534 2891 // Make sure we don't need to mask edx after the above shift
duke@0 2892 assert(btos == 0, "change code, btos != 0");
duke@0 2893
twisti@3534 2894 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
mockner@7970 2895
duke@0 2896 __ jcc(Assembler::notZero, notByte);
duke@0 2897 // btos
thartmann@13007 2898 if (!is_static) pop_and_check_object(obj);
duke@0 2899 __ load_signed_byte(rax, field);
duke@0 2900 __ push(btos);
duke@0 2901 // Rewrite bytecode to be faster
minqi@8102 2902 if (!is_static && rc == may_rewrite) {
duke@0 2903 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
duke@0 2904 }
duke@0 2905 __ jmp(Done);
duke@0 2906
duke@0 2907 __ bind(notByte);
thartmann@13007 2908
thartmann@13007 2909 __ cmpl(flags, qtos);
thartmann@13007 2910 __ jcc(Assembler::notEqual, notValueType);
thartmann@13007 2911 // qtos
thartmann@13007 2912 if (is_static) {
fparain@13041 2913 Label initialized;
fparain@13041 2914 // Issue below if the static field has not been initialized yet
thartmann@13007 2915 __ load_heap_oop(rax, field);
fparain@13041 2916 __ testptr(rax, rax);
fparain@13041 2917 __ jcc(Assembler::notZero, initialized);
fparain@13581 2918 __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
fparain@13041 2919 __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::initialize_static_value_field),
fparain@13581 2920 obj, flags2);
fparain@13041 2921 __ verify_oop(rax);
fparain@13041 2922 __ bind(initialized);
thartmann@13007 2923 __ push(qtos);
thartmann@13007 2924 } else {
thartmann@13007 2925 pop_and_check_object(obj);
fparain@13581 2926 __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
thartmann@13007 2927 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::qgetfield),
fparain@13581 2928 obj, flags2);
thartmann@13007 2929 __ verify_oop(rax);
thartmann@13007 2930 __ push(qtos);
thartmann@13007 2931 // Bytecode rewrite?
thartmann@13007 2932 }
thartmann@13007 2933 __ jmp(Done);
thartmann@13007 2934
thartmann@13007 2935 __ bind(notValueType);
thartmann@13007 2936
thartmann@13007 2937 if (!is_static) pop_and_check_object(obj);
thartmann@13007 2938
coleenp@10885 2939 __ cmpl(flags, ztos);
coleenp@10885 2940 __ jcc(Assembler::notEqual, notBool);
coleenp@10885 2941
coleenp@10885 2942 // ztos (same code as btos)
coleenp@10885 2943 __ load_signed_byte(rax, field);
coleenp@10885 2944 __ push(ztos);
coleenp@10885 2945 // Rewrite bytecode to be faster
coleenp@10885 2946 if (!is_static && rc == may_rewrite) {
coleenp@10885 2947 // use btos rewriting, no truncating to t/f bit is needed for getfield.
coleenp@10885 2948 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
coleenp@10885 2949 }
coleenp@10885 2950 __ jmp(Done);
coleenp@10885 2951
coleenp@10885 2952 __ bind(notBool);
duke@0 2953 __ cmpl(flags, atos);
duke@0 2954 __ jcc(Assembler::notEqual, notObj);
duke@0 2955 // atos
coleenp@113 2956 __ load_heap_oop(rax, field);
duke@0 2957 __ push(atos);
minqi@8102 2958 if (!is_static && rc == may_rewrite) {
duke@0 2959 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
duke@0 2960 }
duke@0 2961 __ jmp(Done);
duke@0 2962
duke@0 2963 __ bind(notObj);
duke@0 2964 __ cmpl(flags, itos);
duke@0 2965 __ jcc(Assembler::notEqual, notInt);
duke@0 2966 // itos
duke@0 2967 __ movl(rax, field);
duke@0 2968 __ push(itos);
duke@0 2969 // Rewrite bytecode to be faster
minqi@8102 2970 if (!is_static && rc == may_rewrite) {
duke@0 2971 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
duke@0 2972 }
duke@0 2973 __ jmp(Done);
duke@0 2974
duke@0 2975 __ bind(notInt);
duke@0 2976 __ cmpl(flags, ctos);
duke@0 2977 __ jcc(Assembler::notEqual, notChar);
duke@0 2978 // ctos
jrose@622 2979 __ load_unsigned_short(rax, field);
duke@0 2980 __ push(ctos);
duke@0 2981 // Rewrite bytecode to be faster
minqi@8102 2982 if (!is_static && rc == may_rewrite) {
duke@0 2983 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
duke@0 2984 }
duke@0 2985 __ jmp(Done);
duke@0 2986
duke@0 2987 __ bind(notChar);
duke@0 2988 __ cmpl(flags, stos);
duke@0 2989 __ jcc(Assembler::notEqual, notShort);
duke@0 2990 // stos
jrose@622 2991 __ load_signed_short(rax, field);
duke@0 2992 __ push(stos);
duke@0 2993 // Rewrite bytecode to be faster
minqi@8102 2994 if (!is_static && rc == may_rewrite) {
duke@0 2995 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
duke@0 2996 }
duke@0 2997 __ jmp(Done);
duke@0 2998
duke@0 2999 __ bind(notShort);
duke@0 3000 __ cmpl(flags, ltos);
duke@0 3001 __ jcc(Assembler::notEqual, notLong);
duke@0 3002 // ltos
mockner@7970 3003
mockner@7970 3004 #ifndef _LP64
mockner@7970 3005 // Generate code as if volatile. There just aren't enough registers to
mockner@7970 3006 // save that information and this code is faster than the test.
mockner@7970 3007 __ fild_d(field); // Must load atomically
mockner@7970 3008 __ subptr(rsp,2*wordSize); // Make space for store
mockner@7970 3009 __ fistp_d(Address(rsp,0));
mockner@7970 3010 __ pop(rax);
mockner@7970 3011 __ pop(rdx);
mockner@7970 3012 #else
duke@0 3013 __ movq(rax, field);
mockner@7970 3014 #endif
mockner@7970 3015
duke@0 3016 __ push(ltos);
duke@0 3017 // Rewrite bytecode to be faster
minqi@8102 3018 LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
duke@0 3019 __ jmp(Done);
duke@0 3020
duke@0 3021 __ bind(notLong);
duke@0 3022 __ cmpl(flags, ftos);
duke@0 3023 __ jcc(Assembler::notEqual, notFloat);
duke@0 3024 // ftos
mockner@7970 3025
zmajo@8879 3026 __ load_float(field);
duke@0 3027 __ push(ftos);
duke@0 3028 // Rewrite bytecode to be faster
minqi@8102 3029 if (!is_static && rc == may_rewrite) {
duke@0 3030 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
duke@0 3031 }
duke@0 3032 __ jmp(Done);
duke@0 3033
duke@0 3034 __ bind(notFloat);
duke@0 3035 #ifdef ASSERT
duke@0 3036 __ cmpl(flags, dtos);
duke@0 3037 __ jcc(Assembler::notEqual, notDouble);
duke@0 3038 #endif
duke@0 3039 // dtos
zmajo@8879 3040 __ load_double(field);
duke@0 3041 __ push(dtos);
duke@0 3042 // Rewrite bytecode to be faster
minqi@8102 3043 if (!is_static && rc == may_rewrite) {
duke@0 3044 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
duke@0 3045 }
duke@0 3046 #ifdef ASSERT
duke@0 3047 __ jmp(Done);
duke@0 3048
mockner@7970 3049
duke@0 3050 __ bind(notDouble);
duke@0 3051 __ stop("Bad state");
duke@0 3052 #endif
duke@0 3053
duke@0 3054 __ bind(Done);
duke@0 3055 // [jk] not needed currently
duke@0 3056 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
duke@0 3057 // Assembler::LoadStore));
duke@0 3058 }
duke@0 3059
duke@0 3060 void TemplateTable::getfield(int byte_no) {
duke@0 3061 getfield_or_static(byte_no, false);
duke@0 3062 }
duke@0 3063
minqi@8102 3064 void TemplateTable::nofast_getfield(int byte_no) {
minqi@8102 3065 getfield_or_static(byte_no, false, may_not_rewrite);
minqi@8102 3066 }
minqi@8102 3067
duke@0 3068 void TemplateTable::getstatic(int byte_no) {
duke@0 3069 getfield_or_static(byte_no, true);
duke@0 3070 }
duke@0 3071
thartmann@13007 3072 void TemplateTable::vwithfield() {
thartmann@13007 3073 transition(vtos, qtos);
thartmann@13007 3074
thartmann@13007 3075 Register cache = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
thartmann@13007 3076 Register index = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
thartmann@13007 3077
thartmann@13007 3078 resolve_cache_and_index(f2_byte, cache, index, sizeof(u2));
thartmann@13007 3079
thartmann@13007 3080 call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::vwithfield), cache);
thartmann@13007 3081 // new value type is returned in rbx
thartmann@13007 3082 // stack adjustement is returned in rax
thartmann@13007 3083 __ verify_oop(rbx);
thartmann@13007 3084 __ addptr(rsp, rax);
thartmann@13007 3085 __ movptr(rax, rbx);
thartmann@13007 3086 }
mockner@7970 3087
duke@0 3088 // The registers cache and index expected to be set before call.
duke@0 3089 // The function may destroy various registers, just not the cache and index registers.
duke@0 3090 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
mockner@7970 3091
mockner@7970 3092 const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax);
mockner@7970 3093 const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx);
mockner@7970 3094 const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
mockner@7970 3095 const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx);
duke@0 3096
coleenp@3602 3097 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
duke@0 3098
duke@0 3099 if (JvmtiExport::can_post_field_modification()) {
duke@0 3100 // Check to see if a field modification watch has been set before
duke@0 3101 // we take the time to call into the VM.
duke@0 3102 Label L1;
duke@0 3103 assert_different_registers(cache, index, rax);
duke@0 3104 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
duke@0 3105 __ testl(rax, rax);
duke@0 3106 __ jcc(Assembler::zero, L1);
duke@0 3107
mockner@7970 3108 __ get_cache_and_index_at_bcp(robj, RDX, 1);
mockner@7970 3109
duke@0 3110
duke@0 3111 if (is_static) {
duke@0 3112 // Life is simple. Null out the object pointer.
mockner@7970 3113 __ xorl(RBX, RBX);
mockner@7970 3114
duke@0 3115 } else {
duke@0 3116 // Life is harder. The stack holds the value on top, followed by
duke@0 3117 // the object. We don't know the size of the value, though; it
duke@0 3118 // could be one or two words depending on its type. As a result,
duke@0 3119 // we must find the type to determine where the object is.
mockner@7970 3120 #ifndef _LP64
mockner@7970 3121 Label two_word, valsize_known;
mockner@7970 3122 #endif
mockner@7970 3123 __ movl(RCX, Address(robj, RDX,
mockner@7970 3124 Address::times_ptr,
duke@0 3125 in_bytes(cp_base_offset +
duke@0 3126 ConstantPoolCacheEntry::flags_offset())));
mockner@7970 3127 NOT_LP64(__ mov(rbx, rsp));
mockner@7970 3128 __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
mockner@7970 3129
twisti@3534 3130 // Make sure we don't need to mask rcx after the above shift
twisti@3534 3131 ConstantPoolCacheEntry::verify_tos_state_shift();
mockner@7970 3132 #ifdef _LP64
never@304 3133 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
duke@0 3134 __ cmpl(c_rarg3, ltos);
never@304 3135 __ cmovptr(Assembler::equal,
never@304 3136 c_rarg1, at_tos_p2()); // ltos (two word jvalue)
duke@0 3137 __ cmpl(c_rarg3, dtos);
never@304 3138 __ cmovptr(Assembler::equal,
never@304 3139 c_rarg1, at_tos_p2()); // dtos (two word jvalue)
mockner@7970 3140 #else
mockner@7970 3141 __ cmpl(rcx, ltos);
mockner@7970 3142 __ jccb(Assembler::equal, two_word);
mockner@7970 3143 __ cmpl(rcx, dtos);
mockner@7970 3144 __ jccb(Assembler::equal, two_word);
mockner@7970 3145 __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
mockner@7970 3146 __ jmpb(valsize_known);
mockner@7970 3147
mockner@7970 3148 __ bind(two_word);
mockner@7970 3149 __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
mockner@7970 3150
mockner@7970 3151 __ bind(valsize_known);
mockner@7970 3152 // setup object pointer
mockner@7970 3153 __ movptr(rbx, Address(rbx, 0));
mockner@7970 3154 #endif
duke@0 3155 }
duke@0 3156 // cache entry pointer
mockner@7970 3157 __ addptr(robj, in_bytes(cp_base_offset));
mockner@7970 3158 __ shll(RDX, LogBytesPerWord);
mockner@7970 3159 __ addptr(robj, RDX);
duke@0 3160 // object (tos)
mockner@7970 3161 __ mov(RCX, rsp);
duke@0 3162 // c_rarg1: object pointer set up above (NULL if static)
duke@0 3163 // c_rarg2: cache entry pointer
duke@0 3164 // c_rarg3: jvalue object on the stack
duke@0 3165 __ call_VM(noreg,
duke@0 3166 CAST_FROM_FN_PTR(address,
duke@0 3167 InterpreterRuntime::post_field_modification),
mockner@7970 3168 RBX, robj, RCX);
duke@0 3169 __ get_cache_and_index_at_bcp(cache, index, 1);
duke@0 3170 __ bind(L1);
duke@0 3171 }
duke@0 3172 }
duke@0 3173
minqi@8102 3174 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
duke@0 3175 transition(vtos, vtos);
duke@0 3176
duke@0 3177 const Register cache = rcx;
duke@0 3178 const Register index = rdx;
duke@0 3179 const Register obj = rcx;
duke@0 3180 const Register off = rbx;
duke@0 3181 const Register flags = rax;
mockner@7970 3182 const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
fparain@13581 3183 const Register flags2 = rdx;
duke@0 3184
coleenp@3602 3185 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
duke@0 3186 jvmti_post_field_mod(cache, index, is_static);
duke@0 3187 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
duke@0 3188
duke@0 3189 // [jk] not needed currently
duke@0 3190 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
duke@0 3191 // Assembler::StoreStore));
duke@0 3192
duke@0 3193 Label notVolatile, Done;
fparain@13581 3194
fparain@13581 3195 __ movl(flags2, flags);
duke@0 3196
mockner@7970 3197 // field addresses
mockner@7970 3198 const Address field(obj, off, Address::times_1, 0*wordSize);
mockner@7970 3199 NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
duke@0 3200
coleenp@10885 3201 Label notByte, notBool, notInt, notShort, notChar,
thartmann@13007 3202 notLong, notFloat, notObj, notValueType, notDouble;
duke@0 3203
twisti@3534 3204 __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
duke@0 3205
duke@0 3206 assert(btos == 0, "change code, btos != 0");
twisti@3534 3207 __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
duke@0 3208 __ jcc(Assembler::notZero, notByte);
twisti@2615 3209
duke@0 3210 // btos
twisti@2615 3211 {
twisti@2615 3212 __ pop(btos);
twisti@2615 3213 if (!is_static) pop_and_check_object(obj);
twisti@2615 3214 __ movb(field, rax);
minqi@8102 3215 if (!is_static && rc == may_rewrite) {
twisti@2615 3216 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
twisti@2615 3217 }
twisti@2615 3218 __ jmp(Done);
duke@0 3219 }
duke@0 3220
duke@0 3221 __ bind(notByte);
coleenp@10885 3222 __ cmpl(flags, ztos);
coleenp@10885 3223 __ jcc(Assembler::notEqual, notBool);
coleenp@10885 3224
coleenp@10885 3225 // ztos
coleenp@10885 3226 {
coleenp@10885 3227 __ pop(ztos);
coleenp@10885 3228 if (!is_static) pop_and_check_object(obj);
coleenp@10885 3229 __ andl(rax, 0x1);
coleenp@10885 3230 __ movb(field, rax);
coleenp@10885 3231 if (!is_static && rc == may_rewrite) {
coleenp@10885 3232 patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
coleenp@10885 3233 }
coleenp@10885 3234 __ jmp(Done);
coleenp@10885 3235 }
coleenp@10885 3236
coleenp@10885 3237 __ bind(notBool);
duke@0 3238 __ cmpl(flags, atos);
duke@0 3239 __ jcc(Assembler::notEqual, notObj);
twisti@2615 3240
duke@0 3241 // atos
twisti@2615 3242 {
twisti@2615 3243 __ pop(atos);
twisti@2615 3244 if (!is_static) pop_and_check_object(obj);
twisti@2615 3245 // Store into the field
twisti@2615 3246 do_oop_store(_masm, field, rax, _bs->kind(), false);
minqi@8102 3247 if (!is_static && rc == may_rewrite) {
twisti@2615 3248 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
twisti@2615 3249 }
twisti@2615 3250 __ jmp(Done);
duke@0 3251 }
duke@0 3252
duke@0 3253 __ bind(notObj);
thartmann@13007 3254 __ cmpl(flags, qtos);
thartmann@13007 3255 __ jcc(Assembler::notEqual, notValueType);
thartmann@13007 3256
thartmann@13007 3257 // qtos
thartmann@13007 3258 {
thartmann@13007 3259 __ pop(qtos); // => rax == value
thartmann@13007 3260 if (!is_static) {
thartmann@13007 3261 // value types in non-static fields are embedded
thartmann@13007 3262 pop_and_check_object(rbx);
thartmann@13007 3263 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::qputfield),
fparain@13581 3264 rbx, rax, flags2);
thartmann@13007 3265 __ jmp(notVolatile); // value types are never volatile
thartmann@13007 3266 } else {
thartmann@13007 3267 // Store into the static field
thartmann@13007 3268 // Value types in static fields are currently handled with indirection
fparain@13041 3269 // but a copy to the Java heap might be required if the value is currently
fparain@13041 3270 // stored in a thread local buffer
fparain@13581 3271 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::qputstatic), rax, off, obj);
thartmann@13007 3272 }
thartmann@13007 3273 __ jmp(Done);
thartmann@13007 3274 }
thartmann@13007 3275
thartmann@13007 3276 __ bind(notValueType);
duke@0 3277 __ cmpl(flags, itos);
duke@0 3278 __ jcc(Assembler::notEqual, notInt);
twisti@2615 3279
duke@0 3280 // itos
twisti@2615 3281 {
twisti@2615 3282 __ pop(itos);
twisti@2615 3283 if (!is_static) pop_and_check_object(obj);
twisti@2615 3284 __ movl(field, rax);
minqi@8102 3285 if (!is_static && rc == may_rewrite) {
twisti@2615 3286 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
twisti@2615 3287 }
twisti@2615 3288 __ jmp(Done);
duke@0 3289 }
duke@0 3290
duke@0 3291 __ bind(notInt);
duke@0 3292 __ cmpl(flags, ctos);
duke@0 3293 __ jcc(Assembler::notEqual, notChar);
twisti@2615 3294
duke@0 3295 // ctos
twisti@2615 3296 {
twisti@2615 3297 __ pop(ctos);
twisti@2615 3298 if (!is_static) pop_and_check_object(obj);
twisti@2615 3299 __ movw(field, rax);
minqi@8102 3300 if (!is_static && rc == may_rewrite) {
twisti@2615 3301 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
twisti@2615 3302 }
twisti@2615