annotate src/cpu/sparc/vm/templateTable_sparc.cpp @ 2346:e1162778c1c8

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer. Reviewed-by: kvn, iveresov, never, tonyp, dholmes
author johnc
date Thu, 07 Apr 2011 09:53:20 -0700
parents 8033953d67ff
children 92add02409c9
rev   line source
duke@0 1 /*
phh@1988 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #include "precompiled.hpp"
stefank@1879 26 #include "interpreter/interpreter.hpp"
stefank@1879 27 #include "interpreter/interpreterRuntime.hpp"
stefank@1879 28 #include "interpreter/templateTable.hpp"
stefank@1879 29 #include "memory/universe.inline.hpp"
stefank@1879 30 #include "oops/methodDataOop.hpp"
stefank@1879 31 #include "oops/objArrayKlass.hpp"
stefank@1879 32 #include "oops/oop.inline.hpp"
stefank@1879 33 #include "prims/methodHandles.hpp"
stefank@1879 34 #include "runtime/sharedRuntime.hpp"
stefank@1879 35 #include "runtime/stubRoutines.hpp"
stefank@1879 36 #include "runtime/synchronizer.hpp"
duke@0 37
duke@0 38 #ifndef CC_INTERP
duke@0 39 #define __ _masm->
duke@0 40
ysr@342 41 // Misc helpers
ysr@342 42
ysr@342 43 // Do an oop store like *(base + index + offset) = val
ysr@342 44 // index can be noreg,
ysr@342 45 static void do_oop_store(InterpreterMacroAssembler* _masm,
ysr@342 46 Register base,
ysr@342 47 Register index,
ysr@342 48 int offset,
ysr@342 49 Register val,
ysr@342 50 Register tmp,
ysr@342 51 BarrierSet::Name barrier,
ysr@342 52 bool precise) {
ysr@342 53 assert(tmp != val && tmp != base && tmp != index, "register collision");
ysr@342 54 assert(index == noreg || offset == 0, "only one offset");
ysr@342 55 switch (barrier) {
ysr@342 56 #ifndef SERIALGC
ysr@342 57 case BarrierSet::G1SATBCT:
ysr@342 58 case BarrierSet::G1SATBCTLogging:
ysr@342 59 {
johnc@2346 60 // Load and record the previous value.
johnc@2346 61 __ g1_write_barrier_pre(base, index, offset,
johnc@2346 62 noreg /* pre_val */,
johnc@2346 63 tmp, true /*preserve_o_regs*/);
johnc@2346 64
ysr@342 65 if (index == noreg ) {
ysr@342 66 assert(Assembler::is_simm13(offset), "fix this code");
ysr@342 67 __ store_heap_oop(val, base, offset);
ysr@342 68 } else {
ysr@342 69 __ store_heap_oop(val, base, index);
ysr@342 70 }
ysr@342 71
ysr@342 72 // No need for post barrier if storing NULL
ysr@342 73 if (val != G0) {
ysr@342 74 if (precise) {
ysr@342 75 if (index == noreg) {
ysr@342 76 __ add(base, offset, base);
ysr@342 77 } else {
ysr@342 78 __ add(base, index, base);
ysr@342 79 }
ysr@342 80 }
ysr@342 81 __ g1_write_barrier_post(base, val, tmp);
ysr@342 82 }
ysr@342 83 }
ysr@342 84 break;
ysr@342 85 #endif // SERIALGC
ysr@342 86 case BarrierSet::CardTableModRef:
ysr@342 87 case BarrierSet::CardTableExtension:
ysr@342 88 {
ysr@342 89 if (index == noreg ) {
ysr@342 90 assert(Assembler::is_simm13(offset), "fix this code");
ysr@342 91 __ store_heap_oop(val, base, offset);
ysr@342 92 } else {
ysr@342 93 __ store_heap_oop(val, base, index);
ysr@342 94 }
ysr@342 95 // No need for post barrier if storing NULL
ysr@342 96 if (val != G0) {
ysr@342 97 if (precise) {
ysr@342 98 if (index == noreg) {
ysr@342 99 __ add(base, offset, base);
ysr@342 100 } else {
ysr@342 101 __ add(base, index, base);
ysr@342 102 }
ysr@342 103 }
ysr@342 104 __ card_write_barrier_post(base, val, tmp);
ysr@342 105 }
ysr@342 106 }
ysr@342 107 break;
ysr@342 108 case BarrierSet::ModRef:
ysr@342 109 case BarrierSet::Other:
ysr@342 110 ShouldNotReachHere();
ysr@342 111 break;
ysr@342 112 default :
ysr@342 113 ShouldNotReachHere();
ysr@342 114
ysr@342 115 }
ysr@342 116 }
ysr@342 117
duke@0 118
duke@0 119 //----------------------------------------------------------------------------------------------------
duke@0 120 // Platform-dependent initialization
duke@0 121
duke@0 122 void TemplateTable::pd_initialize() {
duke@0 123 // (none)
duke@0 124 }
duke@0 125
duke@0 126
duke@0 127 //----------------------------------------------------------------------------------------------------
duke@0 128 // Condition conversion
duke@0 129 Assembler::Condition ccNot(TemplateTable::Condition cc) {
duke@0 130 switch (cc) {
duke@0 131 case TemplateTable::equal : return Assembler::notEqual;
duke@0 132 case TemplateTable::not_equal : return Assembler::equal;
duke@0 133 case TemplateTable::less : return Assembler::greaterEqual;
duke@0 134 case TemplateTable::less_equal : return Assembler::greater;
duke@0 135 case TemplateTable::greater : return Assembler::lessEqual;
duke@0 136 case TemplateTable::greater_equal: return Assembler::less;
duke@0 137 }
duke@0 138 ShouldNotReachHere();
duke@0 139 return Assembler::zero;
duke@0 140 }
duke@0 141
duke@0 142 //----------------------------------------------------------------------------------------------------
duke@0 143 // Miscelaneous helper routines
duke@0 144
duke@0 145
duke@0 146 Address TemplateTable::at_bcp(int offset) {
duke@0 147 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
twisti@727 148 return Address(Lbcp, offset);
duke@0 149 }
duke@0 150
duke@0 151
duke@0 152 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
duke@0 153 Register Rscratch,
duke@0 154 bool load_bc_into_scratch /*=true*/) {
duke@0 155 // With sharing on, may need to test methodOop flag.
duke@0 156 if (!RewriteBytecodes) return;
duke@0 157 if (load_bc_into_scratch) __ set(bc, Rbyte_code);
duke@0 158 Label patch_done;
duke@0 159 if (JvmtiExport::can_post_breakpoint()) {
duke@0 160 Label fast_patch;
duke@0 161 __ ldub(at_bcp(0), Rscratch);
duke@0 162 __ cmp(Rscratch, Bytecodes::_breakpoint);
duke@0 163 __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
duke@0 164 __ delayed()->nop(); // don't bother to hoist the stb here
duke@0 165 // perform the quickening, slowly, in the bowels of the breakpoint table
duke@0 166 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
duke@0 167 __ ba(false, patch_done);
duke@0 168 __ delayed()->nop();
duke@0 169 __ bind(fast_patch);
duke@0 170 }
duke@0 171 #ifdef ASSERT
duke@0 172 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
duke@0 173 Label okay;
duke@0 174 __ ldub(at_bcp(0), Rscratch);
duke@0 175 __ cmp(Rscratch, orig_bytecode);
duke@0 176 __ br(Assembler::equal, false, Assembler::pt, okay);
duke@0 177 __ delayed() ->cmp(Rscratch, Rbyte_code);
duke@0 178 __ br(Assembler::equal, false, Assembler::pt, okay);
duke@0 179 __ delayed()->nop();
duke@0 180 __ stop("Rewriting wrong bytecode location");
duke@0 181 __ bind(okay);
duke@0 182 #endif
duke@0 183 __ stb(Rbyte_code, at_bcp(0));
duke@0 184 __ bind(patch_done);
duke@0 185 }
duke@0 186
duke@0 187 //----------------------------------------------------------------------------------------------------
duke@0 188 // Individual instructions
duke@0 189
duke@0 190 void TemplateTable::nop() {
duke@0 191 transition(vtos, vtos);
duke@0 192 // nothing to do
duke@0 193 }
duke@0 194
duke@0 195 void TemplateTable::shouldnotreachhere() {
duke@0 196 transition(vtos, vtos);
duke@0 197 __ stop("shouldnotreachhere bytecode");
duke@0 198 }
duke@0 199
duke@0 200 void TemplateTable::aconst_null() {
duke@0 201 transition(vtos, atos);
duke@0 202 __ clr(Otos_i);
duke@0 203 }
duke@0 204
duke@0 205
duke@0 206 void TemplateTable::iconst(int value) {
duke@0 207 transition(vtos, itos);
duke@0 208 __ set(value, Otos_i);
duke@0 209 }
duke@0 210
duke@0 211
duke@0 212 void TemplateTable::lconst(int value) {
duke@0 213 transition(vtos, ltos);
duke@0 214 assert(value >= 0, "check this code");
duke@0 215 #ifdef _LP64
duke@0 216 __ set(value, Otos_l);
duke@0 217 #else
duke@0 218 __ set(value, Otos_l2);
duke@0 219 __ clr( Otos_l1);
duke@0 220 #endif
duke@0 221 }
duke@0 222
duke@0 223
duke@0 224 void TemplateTable::fconst(int value) {
duke@0 225 transition(vtos, ftos);
duke@0 226 static float zero = 0.0, one = 1.0, two = 2.0;
duke@0 227 float* p;
duke@0 228 switch( value ) {
duke@0 229 default: ShouldNotReachHere();
duke@0 230 case 0: p = &zero; break;
duke@0 231 case 1: p = &one; break;
duke@0 232 case 2: p = &two; break;
duke@0 233 }
twisti@727 234 AddressLiteral a(p);
twisti@727 235 __ sethi(a, G3_scratch);
twisti@727 236 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
duke@0 237 }
duke@0 238
duke@0 239
duke@0 240 void TemplateTable::dconst(int value) {
duke@0 241 transition(vtos, dtos);
duke@0 242 static double zero = 0.0, one = 1.0;
duke@0 243 double* p;
duke@0 244 switch( value ) {
duke@0 245 default: ShouldNotReachHere();
duke@0 246 case 0: p = &zero; break;
duke@0 247 case 1: p = &one; break;
duke@0 248 }
twisti@727 249 AddressLiteral a(p);
twisti@727 250 __ sethi(a, G3_scratch);
twisti@727 251 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
duke@0 252 }
duke@0 253
duke@0 254
duke@0 255 // %%%%% Should factore most snippet templates across platforms
duke@0 256
duke@0 257 void TemplateTable::bipush() {
duke@0 258 transition(vtos, itos);
duke@0 259 __ ldsb( at_bcp(1), Otos_i );
duke@0 260 }
duke@0 261
duke@0 262 void TemplateTable::sipush() {
duke@0 263 transition(vtos, itos);
duke@0 264 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
duke@0 265 }
duke@0 266
duke@0 267 void TemplateTable::ldc(bool wide) {
duke@0 268 transition(vtos, vtos);
duke@0 269 Label call_ldc, notInt, notString, notClass, exit;
duke@0 270
duke@0 271 if (wide) {
duke@0 272 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
duke@0 273 } else {
duke@0 274 __ ldub(Lbcp, 1, O1);
duke@0 275 }
duke@0 276 __ get_cpool_and_tags(O0, O2);
duke@0 277
duke@0 278 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
duke@0 279 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
duke@0 280
duke@0 281 // get type from tags
duke@0 282 __ add(O2, tags_offset, O2);
duke@0 283 __ ldub(O2, O1, O2);
duke@0 284 __ cmp(O2, JVM_CONSTANT_UnresolvedString); // unresolved string? If so, must resolve
duke@0 285 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
duke@0 286 __ delayed()->nop();
duke@0 287
duke@0 288 __ cmp(O2, JVM_CONSTANT_UnresolvedClass); // unresolved class? If so, must resolve
duke@0 289 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
duke@0 290 __ delayed()->nop();
duke@0 291
duke@0 292 __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError); // unresolved class in error state
duke@0 293 __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
duke@0 294 __ delayed()->nop();
duke@0 295
duke@0 296 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
duke@0 297 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
duke@0 298 __ delayed()->add(O0, base_offset, O0);
duke@0 299
duke@0 300 __ bind(call_ldc);
duke@0 301 __ set(wide, O1);
duke@0 302 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
duke@0 303 __ push(atos);
duke@0 304 __ ba(false, exit);
duke@0 305 __ delayed()->nop();
duke@0 306
duke@0 307 __ bind(notClass);
duke@0 308 // __ add(O0, base_offset, O0);
duke@0 309 __ sll(O1, LogBytesPerWord, O1);
duke@0 310 __ cmp(O2, JVM_CONSTANT_Integer);
duke@0 311 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
duke@0 312 __ delayed()->cmp(O2, JVM_CONSTANT_String);
duke@0 313 __ ld(O0, O1, Otos_i);
duke@0 314 __ push(itos);
duke@0 315 __ ba(false, exit);
duke@0 316 __ delayed()->nop();
duke@0 317
duke@0 318 __ bind(notInt);
duke@0 319 // __ cmp(O2, JVM_CONSTANT_String);
duke@0 320 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
duke@0 321 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
duke@0 322 __ ld_ptr(O0, O1, Otos_i);
duke@0 323 __ verify_oop(Otos_i);
duke@0 324 __ push(atos);
duke@0 325 __ ba(false, exit);
duke@0 326 __ delayed()->nop();
duke@0 327
duke@0 328 __ bind(notString);
duke@0 329 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
duke@0 330 __ push(ftos);
duke@0 331
duke@0 332 __ bind(exit);
duke@0 333 }
duke@0 334
jrose@1522 335 // Fast path for caching oop constants.
jrose@1522 336 // %%% We should use this to handle Class and String constants also.
jrose@1522 337 // %%% It will simplify the ldc/primitive path considerably.
jrose@1522 338 void TemplateTable::fast_aldc(bool wide) {
jrose@1522 339 transition(vtos, atos);
jrose@1522 340
jrose@1522 341 if (!EnableMethodHandles) {
jrose@1522 342 // We should not encounter this bytecode if !EnableMethodHandles.
jrose@1522 343 // The verifier will stop it. However, if we get past the verifier,
jrose@1522 344 // this will stop the thread in a reasonable way, without crashing the JVM.
jrose@1522 345 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
jrose@1522 346 InterpreterRuntime::throw_IncompatibleClassChangeError));
jrose@1522 347 // the call_VM checks for exception, so we should never return here.
jrose@1522 348 __ should_not_reach_here();
jrose@1522 349 return;
jrose@1522 350 }
jrose@1522 351
jrose@1522 352 Register Rcache = G3_scratch;
jrose@1522 353 Register Rscratch = G4_scratch;
jrose@1522 354
jrose@1522 355 resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
jrose@1522 356
jrose@1522 357 __ verify_oop(Otos_i);
jrose@1833 358
jrose@1833 359 Label L_done;
jrose@1833 360 const Register Rcon_klass = G3_scratch; // same as Rcache
jrose@1833 361 const Register Rarray_klass = G4_scratch; // same as Rscratch
jrose@1833 362 __ load_klass(Otos_i, Rcon_klass);
jrose@1833 363 AddressLiteral array_klass_addr((address)Universe::systemObjArrayKlassObj_addr());
jrose@1833 364 __ load_contents(array_klass_addr, Rarray_klass);
jrose@1833 365 __ cmp(Rarray_klass, Rcon_klass);
jrose@1833 366 __ brx(Assembler::notEqual, false, Assembler::pt, L_done);
jrose@1833 367 __ delayed()->nop();
jrose@1833 368 __ ld(Address(Otos_i, arrayOopDesc::length_offset_in_bytes()), Rcon_klass);
jrose@1833 369 __ tst(Rcon_klass);
jrose@1833 370 __ brx(Assembler::zero, true, Assembler::pt, L_done);
jrose@1833 371 __ delayed()->clr(Otos_i); // executed only if branch is taken
jrose@1833 372
jrose@1833 373 // Load the exception from the system-array which wraps it:
jrose@1833 374 __ load_heap_oop(Otos_i, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
jrose@1833 375 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
jrose@1833 376
jrose@1833 377 __ bind(L_done);
jrose@1522 378 }
jrose@1522 379
duke@0 380 void TemplateTable::ldc2_w() {
duke@0 381 transition(vtos, vtos);
duke@0 382 Label retry, resolved, Long, exit;
duke@0 383
duke@0 384 __ bind(retry);
duke@0 385 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
duke@0 386 __ get_cpool_and_tags(O0, O2);
duke@0 387
duke@0 388 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
duke@0 389 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
duke@0 390 // get type from tags
duke@0 391 __ add(O2, tags_offset, O2);
duke@0 392 __ ldub(O2, O1, O2);
duke@0 393
duke@0 394 __ sll(O1, LogBytesPerWord, O1);
duke@0 395 __ add(O0, O1, G3_scratch);
duke@0 396
duke@0 397 __ cmp(O2, JVM_CONSTANT_Double);
duke@0 398 __ brx(Assembler::notEqual, false, Assembler::pt, Long);
duke@0 399 __ delayed()->nop();
duke@0 400 // A double can be placed at word-aligned locations in the constant pool.
duke@0 401 // Check out Conversions.java for an example.
duke@0 402 // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
duke@0 403 // to double-align double on the constant pool. SG, 11/7/97
duke@0 404 #ifdef _LP64
duke@0 405 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
duke@0 406 #else
duke@0 407 FloatRegister f = Ftos_d;
duke@0 408 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
duke@0 409 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
duke@0 410 f->successor());
duke@0 411 #endif
duke@0 412 __ push(dtos);
duke@0 413 __ ba(false, exit);
duke@0 414 __ delayed()->nop();
duke@0 415
duke@0 416 __ bind(Long);
duke@0 417 #ifdef _LP64
duke@0 418 __ ldx(G3_scratch, base_offset, Otos_l);
duke@0 419 #else
duke@0 420 __ ld(G3_scratch, base_offset, Otos_l);
duke@0 421 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
duke@0 422 #endif
duke@0 423 __ push(ltos);
duke@0 424
duke@0 425 __ bind(exit);
duke@0 426 }
duke@0 427
duke@0 428
duke@0 429 void TemplateTable::locals_index(Register reg, int offset) {
duke@0 430 __ ldub( at_bcp(offset), reg );
duke@0 431 }
duke@0 432
duke@0 433
duke@0 434 void TemplateTable::locals_index_wide(Register reg) {
duke@0 435 // offset is 2, not 1, because Lbcp points to wide prefix code
duke@0 436 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
duke@0 437 }
duke@0 438
duke@0 439 void TemplateTable::iload() {
duke@0 440 transition(vtos, itos);
duke@0 441 // Rewrite iload,iload pair into fast_iload2
duke@0 442 // iload,caload pair into fast_icaload
duke@0 443 if (RewriteFrequentPairs) {
duke@0 444 Label rewrite, done;
duke@0 445
duke@0 446 // get next byte
duke@0 447 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
duke@0 448
duke@0 449 // if _iload, wait to rewrite to iload2. We only want to rewrite the
duke@0 450 // last two iloads in a pair. Comparing against fast_iload means that
duke@0 451 // the next bytecode is neither an iload or a caload, and therefore
duke@0 452 // an iload pair.
duke@0 453 __ cmp(G3_scratch, (int)Bytecodes::_iload);
duke@0 454 __ br(Assembler::equal, false, Assembler::pn, done);
duke@0 455 __ delayed()->nop();
duke@0 456
duke@0 457 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
duke@0 458 __ br(Assembler::equal, false, Assembler::pn, rewrite);
duke@0 459 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
duke@0 460
duke@0 461 __ cmp(G3_scratch, (int)Bytecodes::_caload);
duke@0 462 __ br(Assembler::equal, false, Assembler::pn, rewrite);
duke@0 463 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
duke@0 464
duke@0 465 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
duke@0 466 // rewrite
duke@0 467 // G4_scratch: fast bytecode
duke@0 468 __ bind(rewrite);
duke@0 469 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
duke@0 470 __ bind(done);
duke@0 471 }
duke@0 472
duke@0 473 // Get the local value into tos
duke@0 474 locals_index(G3_scratch);
duke@0 475 __ access_local_int( G3_scratch, Otos_i );
duke@0 476 }
duke@0 477
duke@0 478 void TemplateTable::fast_iload2() {
duke@0 479 transition(vtos, itos);
duke@0 480 locals_index(G3_scratch);
duke@0 481 __ access_local_int( G3_scratch, Otos_i );
duke@0 482 __ push_i();
duke@0 483 locals_index(G3_scratch, 3); // get next bytecode's local index.
duke@0 484 __ access_local_int( G3_scratch, Otos_i );
duke@0 485 }
duke@0 486
duke@0 487 void TemplateTable::fast_iload() {
duke@0 488 transition(vtos, itos);
duke@0 489 locals_index(G3_scratch);
duke@0 490 __ access_local_int( G3_scratch, Otos_i );
duke@0 491 }
duke@0 492
duke@0 493 void TemplateTable::lload() {
duke@0 494 transition(vtos, ltos);
duke@0 495 locals_index(G3_scratch);
duke@0 496 __ access_local_long( G3_scratch, Otos_l );
duke@0 497 }
duke@0 498
duke@0 499
duke@0 500 void TemplateTable::fload() {
duke@0 501 transition(vtos, ftos);
duke@0 502 locals_index(G3_scratch);
duke@0 503 __ access_local_float( G3_scratch, Ftos_f );
duke@0 504 }
duke@0 505
duke@0 506
duke@0 507 void TemplateTable::dload() {
duke@0 508 transition(vtos, dtos);
duke@0 509 locals_index(G3_scratch);
duke@0 510 __ access_local_double( G3_scratch, Ftos_d );
duke@0 511 }
duke@0 512
duke@0 513
duke@0 514 void TemplateTable::aload() {
duke@0 515 transition(vtos, atos);
duke@0 516 locals_index(G3_scratch);
duke@0 517 __ access_local_ptr( G3_scratch, Otos_i);
duke@0 518 }
duke@0 519
duke@0 520
duke@0 521 void TemplateTable::wide_iload() {
duke@0 522 transition(vtos, itos);
duke@0 523 locals_index_wide(G3_scratch);
duke@0 524 __ access_local_int( G3_scratch, Otos_i );
duke@0 525 }
duke@0 526
duke@0 527
duke@0 528 void TemplateTable::wide_lload() {
duke@0 529 transition(vtos, ltos);
duke@0 530 locals_index_wide(G3_scratch);
duke@0 531 __ access_local_long( G3_scratch, Otos_l );
duke@0 532 }
duke@0 533
duke@0 534
duke@0 535 void TemplateTable::wide_fload() {
duke@0 536 transition(vtos, ftos);
duke@0 537 locals_index_wide(G3_scratch);
duke@0 538 __ access_local_float( G3_scratch, Ftos_f );
duke@0 539 }
duke@0 540
duke@0 541
duke@0 542 void TemplateTable::wide_dload() {
duke@0 543 transition(vtos, dtos);
duke@0 544 locals_index_wide(G3_scratch);
duke@0 545 __ access_local_double( G3_scratch, Ftos_d );
duke@0 546 }
duke@0 547
duke@0 548
duke@0 549 void TemplateTable::wide_aload() {
duke@0 550 transition(vtos, atos);
duke@0 551 locals_index_wide(G3_scratch);
duke@0 552 __ access_local_ptr( G3_scratch, Otos_i );
duke@0 553 __ verify_oop(Otos_i);
duke@0 554 }
duke@0 555
duke@0 556
duke@0 557 void TemplateTable::iaload() {
duke@0 558 transition(itos, itos);
duke@0 559 // Otos_i: index
duke@0 560 // tos: array
duke@0 561 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
duke@0 562 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
duke@0 563 }
duke@0 564
duke@0 565
duke@0 566 void TemplateTable::laload() {
duke@0 567 transition(itos, ltos);
duke@0 568 // Otos_i: index
duke@0 569 // O2: array
duke@0 570 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
duke@0 571 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
duke@0 572 }
duke@0 573
duke@0 574
duke@0 575 void TemplateTable::faload() {
duke@0 576 transition(itos, ftos);
duke@0 577 // Otos_i: index
duke@0 578 // O2: array
duke@0 579 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
duke@0 580 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
duke@0 581 }
duke@0 582
duke@0 583
duke@0 584 void TemplateTable::daload() {
duke@0 585 transition(itos, dtos);
duke@0 586 // Otos_i: index
duke@0 587 // O2: array
duke@0 588 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
duke@0 589 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
duke@0 590 }
duke@0 591
duke@0 592
duke@0 593 void TemplateTable::aaload() {
duke@0 594 transition(itos, atos);
duke@0 595 // Otos_i: index
duke@0 596 // tos: array
coleenp@113 597 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
coleenp@113 598 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
duke@0 599 __ verify_oop(Otos_i);
duke@0 600 }
duke@0 601
duke@0 602
duke@0 603 void TemplateTable::baload() {
duke@0 604 transition(itos, itos);
duke@0 605 // Otos_i: index
duke@0 606 // tos: array
duke@0 607 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
duke@0 608 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
duke@0 609 }
duke@0 610
duke@0 611
duke@0 612 void TemplateTable::caload() {
duke@0 613 transition(itos, itos);
duke@0 614 // Otos_i: index
duke@0 615 // tos: array
duke@0 616 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
duke@0 617 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
duke@0 618 }
duke@0 619
duke@0 620 void TemplateTable::fast_icaload() {
duke@0 621 transition(vtos, itos);
duke@0 622 // Otos_i: index
duke@0 623 // tos: array
duke@0 624 locals_index(G3_scratch);
duke@0 625 __ access_local_int( G3_scratch, Otos_i );
duke@0 626 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
duke@0 627 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
duke@0 628 }
duke@0 629
duke@0 630
duke@0 631 void TemplateTable::saload() {
duke@0 632 transition(itos, itos);
duke@0 633 // Otos_i: index
duke@0 634 // tos: array
duke@0 635 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
duke@0 636 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
duke@0 637 }
duke@0 638
duke@0 639
duke@0 640 void TemplateTable::iload(int n) {
duke@0 641 transition(vtos, itos);
duke@0 642 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
duke@0 643 }
duke@0 644
duke@0 645
duke@0 646 void TemplateTable::lload(int n) {
duke@0 647 transition(vtos, ltos);
duke@0 648 assert(n+1 < Argument::n_register_parameters, "would need more code");
duke@0 649 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
duke@0 650 }
duke@0 651
duke@0 652
duke@0 653 void TemplateTable::fload(int n) {
duke@0 654 transition(vtos, ftos);
duke@0 655 assert(n < Argument::n_register_parameters, "would need more code");
duke@0 656 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
duke@0 657 }
duke@0 658
duke@0 659
duke@0 660 void TemplateTable::dload(int n) {
duke@0 661 transition(vtos, dtos);
duke@0 662 FloatRegister dst = Ftos_d;
duke@0 663 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
duke@0 664 }
duke@0 665
duke@0 666
duke@0 667 void TemplateTable::aload(int n) {
duke@0 668 transition(vtos, atos);
duke@0 669 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
duke@0 670 }
duke@0 671
duke@0 672
duke@0 673 void TemplateTable::aload_0() {
duke@0 674 transition(vtos, atos);
duke@0 675
duke@0 676 // According to bytecode histograms, the pairs:
duke@0 677 //
duke@0 678 // _aload_0, _fast_igetfield (itos)
duke@0 679 // _aload_0, _fast_agetfield (atos)
duke@0 680 // _aload_0, _fast_fgetfield (ftos)
duke@0 681 //
duke@0 682 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
duke@0 683 // bytecode checks the next bytecode and then rewrites the current
duke@0 684 // bytecode into a pair bytecode; otherwise it rewrites the current
duke@0 685 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
duke@0 686 //
duke@0 687 if (RewriteFrequentPairs) {
duke@0 688 Label rewrite, done;
duke@0 689
duke@0 690 // get next byte
duke@0 691 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
duke@0 692
duke@0 693 // do actual aload_0
duke@0 694 aload(0);
duke@0 695
duke@0 696 // if _getfield then wait with rewrite
duke@0 697 __ cmp(G3_scratch, (int)Bytecodes::_getfield);
duke@0 698 __ br(Assembler::equal, false, Assembler::pn, done);
duke@0 699 __ delayed()->nop();
duke@0 700
duke@0 701 // if _igetfield then rewrite to _fast_iaccess_0
duke@0 702 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
duke@0 703 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
duke@0 704 __ br(Assembler::equal, false, Assembler::pn, rewrite);
duke@0 705 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
duke@0 706
duke@0 707 // if _agetfield then rewrite to _fast_aaccess_0
duke@0 708 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
duke@0 709 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
duke@0 710 __ br(Assembler::equal, false, Assembler::pn, rewrite);
duke@0 711 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
duke@0 712
duke@0 713 // if _fgetfield then rewrite to _fast_faccess_0
duke@0 714 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
duke@0 715 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
duke@0 716 __ br(Assembler::equal, false, Assembler::pn, rewrite);
duke@0 717 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
duke@0 718
duke@0 719 // else rewrite to _fast_aload0
duke@0 720 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
duke@0 721 __ set(Bytecodes::_fast_aload_0, G4_scratch);
duke@0 722
duke@0 723 // rewrite
duke@0 724 // G4_scratch: fast bytecode
duke@0 725 __ bind(rewrite);
duke@0 726 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
duke@0 727 __ bind(done);
duke@0 728 } else {
duke@0 729 aload(0);
duke@0 730 }
duke@0 731 }
duke@0 732
duke@0 733
duke@0 734 void TemplateTable::istore() {
duke@0 735 transition(itos, vtos);
duke@0 736 locals_index(G3_scratch);
duke@0 737 __ store_local_int( G3_scratch, Otos_i );
duke@0 738 }
duke@0 739
duke@0 740
duke@0 741 void TemplateTable::lstore() {
duke@0 742 transition(ltos, vtos);
duke@0 743 locals_index(G3_scratch);
duke@0 744 __ store_local_long( G3_scratch, Otos_l );
duke@0 745 }
duke@0 746
duke@0 747
duke@0 748 void TemplateTable::fstore() {
duke@0 749 transition(ftos, vtos);
duke@0 750 locals_index(G3_scratch);
duke@0 751 __ store_local_float( G3_scratch, Ftos_f );
duke@0 752 }
duke@0 753
duke@0 754
duke@0 755 void TemplateTable::dstore() {
duke@0 756 transition(dtos, vtos);
duke@0 757 locals_index(G3_scratch);
duke@0 758 __ store_local_double( G3_scratch, Ftos_d );
duke@0 759 }
duke@0 760
duke@0 761
duke@0 762 void TemplateTable::astore() {
duke@0 763 transition(vtos, vtos);
twisti@1426 764 __ load_ptr(0, Otos_i);
twisti@1426 765 __ inc(Lesp, Interpreter::stackElementSize);
duke@0 766 __ verify_oop_or_return_address(Otos_i, G3_scratch);
duke@0 767 locals_index(G3_scratch);
twisti@1426 768 __ store_local_ptr(G3_scratch, Otos_i);
duke@0 769 }
duke@0 770
duke@0 771
duke@0 772 void TemplateTable::wide_istore() {
duke@0 773 transition(vtos, vtos);
duke@0 774 __ pop_i();
duke@0 775 locals_index_wide(G3_scratch);
duke@0 776 __ store_local_int( G3_scratch, Otos_i );
duke@0 777 }
duke@0 778
duke@0 779
duke@0 780 void TemplateTable::wide_lstore() {
duke@0 781 transition(vtos, vtos);
duke@0 782 __ pop_l();
duke@0 783 locals_index_wide(G3_scratch);
duke@0 784 __ store_local_long( G3_scratch, Otos_l );
duke@0 785 }
duke@0 786
duke@0 787
duke@0 788 void TemplateTable::wide_fstore() {
duke@0 789 transition(vtos, vtos);
duke@0 790 __ pop_f();
duke@0 791 locals_index_wide(G3_scratch);
duke@0 792 __ store_local_float( G3_scratch, Ftos_f );
duke@0 793 }
duke@0 794
duke@0 795
duke@0 796 void TemplateTable::wide_dstore() {
duke@0 797 transition(vtos, vtos);
duke@0 798 __ pop_d();
duke@0 799 locals_index_wide(G3_scratch);
duke@0 800 __ store_local_double( G3_scratch, Ftos_d );
duke@0 801 }
duke@0 802
duke@0 803
duke@0 804 void TemplateTable::wide_astore() {
duke@0 805 transition(vtos, vtos);
twisti@1426 806 __ load_ptr(0, Otos_i);
twisti@1426 807 __ inc(Lesp, Interpreter::stackElementSize);
duke@0 808 __ verify_oop_or_return_address(Otos_i, G3_scratch);
duke@0 809 locals_index_wide(G3_scratch);
twisti@1426 810 __ store_local_ptr(G3_scratch, Otos_i);
duke@0 811 }
duke@0 812
duke@0 813
duke@0 814 void TemplateTable::iastore() {
duke@0 815 transition(itos, vtos);
duke@0 816 __ pop_i(O2); // index
duke@0 817 // Otos_i: val
duke@0 818 // O3: array
duke@0 819 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
duke@0 820 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
duke@0 821 }
duke@0 822
duke@0 823
duke@0 824 void TemplateTable::lastore() {
duke@0 825 transition(ltos, vtos);
duke@0 826 __ pop_i(O2); // index
duke@0 827 // Otos_l: val
duke@0 828 // O3: array
duke@0 829 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
duke@0 830 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
duke@0 831 }
duke@0 832
duke@0 833
duke@0 834 void TemplateTable::fastore() {
duke@0 835 transition(ftos, vtos);
duke@0 836 __ pop_i(O2); // index
duke@0 837 // Ftos_f: val
duke@0 838 // O3: array
duke@0 839 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
duke@0 840 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
duke@0 841 }
duke@0 842
duke@0 843
duke@0 844 void TemplateTable::dastore() {
duke@0 845 transition(dtos, vtos);
duke@0 846 __ pop_i(O2); // index
duke@0 847 // Fos_d: val
duke@0 848 // O3: array
duke@0 849 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
duke@0 850 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
duke@0 851 }
duke@0 852
duke@0 853
duke@0 854 void TemplateTable::aastore() {
duke@0 855 Label store_ok, is_null, done;
duke@0 856 transition(vtos, vtos);
duke@0 857 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
duke@0 858 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
duke@0 859 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
duke@0 860 // Otos_i: val
duke@0 861 // O2: index
duke@0 862 // O3: array
duke@0 863 __ verify_oop(Otos_i);
coleenp@113 864 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
duke@0 865
duke@0 866 // do array store check - check for NULL value first
duke@0 867 __ br_null( Otos_i, false, Assembler::pn, is_null );
coleenp@113 868 __ delayed()->nop();
coleenp@113 869
coleenp@113 870 __ load_klass(O3, O4); // get array klass
coleenp@113 871 __ load_klass(Otos_i, O5); // get value klass
duke@0 872
duke@0 873 // do fast instanceof cache test
duke@0 874
duke@0 875 __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
duke@0 876
duke@0 877 assert(Otos_i == O0, "just checking");
duke@0 878
duke@0 879 // Otos_i: value
duke@0 880 // O1: addr - offset
duke@0 881 // O2: index
duke@0 882 // O3: array
duke@0 883 // O4: array element klass
duke@0 884 // O5: value klass
duke@0 885
ysr@342 886 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
ysr@342 887
duke@0 888 // Generate a fast subtype check. Branch to store_ok if no
duke@0 889 // failure. Throw if failure.
duke@0 890 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
duke@0 891
duke@0 892 // Not a subtype; so must throw exception
duke@0 893 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
duke@0 894
duke@0 895 // Store is OK.
duke@0 896 __ bind(store_ok);
ysr@342 897 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
ysr@342 898
duke@0 899 __ ba(false,done);
twisti@1426 900 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
duke@0 901
duke@0 902 __ bind(is_null);
ysr@342 903 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
ysr@342 904
duke@0 905 __ profile_null_seen(G3_scratch);
twisti@1426 906 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
duke@0 907 __ bind(done);
duke@0 908 }
duke@0 909
duke@0 910
duke@0 911 void TemplateTable::bastore() {
duke@0 912 transition(itos, vtos);
duke@0 913 __ pop_i(O2); // index
duke@0 914 // Otos_i: val
duke@0 915 // O3: array
duke@0 916 __ index_check(O3, O2, 0, G3_scratch, O2);
duke@0 917 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
duke@0 918 }
duke@0 919
duke@0 920
duke@0 921 void TemplateTable::castore() {
duke@0 922 transition(itos, vtos);
duke@0 923 __ pop_i(O2); // index
duke@0 924 // Otos_i: val
duke@0 925 // O3: array
duke@0 926 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
duke@0 927 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
duke@0 928 }
duke@0 929
duke@0 930
duke@0 931 void TemplateTable::sastore() {
duke@0 932 // %%%%% Factor across platform
duke@0 933 castore();
duke@0 934 }
duke@0 935
duke@0 936
duke@0 937 void TemplateTable::istore(int n) {
duke@0 938 transition(itos, vtos);
duke@0 939 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
duke@0 940 }
duke@0 941
duke@0 942
duke@0 943 void TemplateTable::lstore(int n) {
duke@0 944 transition(ltos, vtos);
duke@0 945 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
duke@0 946 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
duke@0 947
duke@0 948 }
duke@0 949
duke@0 950
duke@0 951 void TemplateTable::fstore(int n) {
duke@0 952 transition(ftos, vtos);
duke@0 953 assert(n < Argument::n_register_parameters, "only handle register cases");
duke@0 954 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
duke@0 955 }
duke@0 956
duke@0 957
duke@0 958 void TemplateTable::dstore(int n) {
duke@0 959 transition(dtos, vtos);
duke@0 960 FloatRegister src = Ftos_d;
duke@0 961 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
duke@0 962 }
duke@0 963
duke@0 964
duke@0 965 void TemplateTable::astore(int n) {
duke@0 966 transition(vtos, vtos);
twisti@1426 967 __ load_ptr(0, Otos_i);
twisti@1426 968 __ inc(Lesp, Interpreter::stackElementSize);
duke@0 969 __ verify_oop_or_return_address(Otos_i, G3_scratch);
twisti@1426 970 __ store_local_ptr(n, Otos_i);
duke@0 971 }
duke@0 972
duke@0 973
duke@0 974 void TemplateTable::pop() {
duke@0 975 transition(vtos, vtos);
twisti@1426 976 __ inc(Lesp, Interpreter::stackElementSize);
duke@0 977 }
duke@0 978
duke@0 979
duke@0 980 void TemplateTable::pop2() {
duke@0 981 transition(vtos, vtos);
twisti@1426 982 __ inc(Lesp, 2 * Interpreter::stackElementSize);
duke@0 983 }
duke@0 984
duke@0 985
duke@0 986 void TemplateTable::dup() {
duke@0 987 transition(vtos, vtos);
duke@0 988 // stack: ..., a
duke@0 989 // load a and tag
twisti@1426 990 __ load_ptr(0, Otos_i);
twisti@1426 991 __ push_ptr(Otos_i);
duke@0 992 // stack: ..., a, a
duke@0 993 }
duke@0 994
duke@0 995
duke@0 996 void TemplateTable::dup_x1() {
duke@0 997 transition(vtos, vtos);
duke@0 998 // stack: ..., a, b
twisti@1426 999 __ load_ptr( 1, G3_scratch); // get a
twisti@1426 1000 __ load_ptr( 0, Otos_l1); // get b
twisti@1426 1001 __ store_ptr(1, Otos_l1); // put b
twisti@1426 1002 __ store_ptr(0, G3_scratch); // put a - like swap
twisti@1426 1003 __ push_ptr(Otos_l1); // push b
duke@0 1004 // stack: ..., b, a, b
duke@0 1005 }
duke@0 1006
duke@0 1007
duke@0 1008 void TemplateTable::dup_x2() {
duke@0 1009 transition(vtos, vtos);
duke@0 1010 // stack: ..., a, b, c
duke@0 1011 // get c and push on stack, reuse registers
twisti@1426 1012 __ load_ptr( 0, G3_scratch); // get c
twisti@1426 1013 __ push_ptr(G3_scratch); // push c with tag
duke@0 1014 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
duke@0 1015 // (stack offsets n+1 now)
twisti@1426 1016 __ load_ptr( 3, Otos_l1); // get a
twisti@1426 1017 __ store_ptr(3, G3_scratch); // put c at 3
duke@0 1018 // stack: ..., c, b, c, c (a in reg)
twisti@1426 1019 __ load_ptr( 2, G3_scratch); // get b
twisti@1426 1020 __ store_ptr(2, Otos_l1); // put a at 2
duke@0 1021 // stack: ..., c, a, c, c (b in reg)
twisti@1426 1022 __ store_ptr(1, G3_scratch); // put b at 1
duke@0 1023 // stack: ..., c, a, b, c
duke@0 1024 }
duke@0 1025
duke@0 1026
duke@0 1027 void TemplateTable::dup2() {
duke@0 1028 transition(vtos, vtos);
twisti@1426 1029 __ load_ptr(1, G3_scratch); // get a
twisti@1426 1030 __ load_ptr(0, Otos_l1); // get b
twisti@1426 1031 __ push_ptr(G3_scratch); // push a
twisti@1426 1032 __ push_ptr(Otos_l1); // push b
duke@0 1033 // stack: ..., a, b, a, b
duke@0 1034 }
duke@0 1035
duke@0 1036
duke@0 1037 void TemplateTable::dup2_x1() {
duke@0 1038 transition(vtos, vtos);
duke@0 1039 // stack: ..., a, b, c
twisti@1426 1040 __ load_ptr( 1, Lscratch); // get b
twisti@1426 1041 __ load_ptr( 2, Otos_l1); // get a
twisti@1426 1042 __ store_ptr(2, Lscratch); // put b at a
duke@0 1043 // stack: ..., b, b, c
twisti@1426 1044 __ load_ptr( 0, G3_scratch); // get c
twisti@1426 1045 __ store_ptr(1, G3_scratch); // put c at b
duke@0 1046 // stack: ..., b, c, c
twisti@1426 1047 __ store_ptr(0, Otos_l1); // put a at c
duke@0 1048 // stack: ..., b, c, a
twisti@1426 1049 __ push_ptr(Lscratch); // push b
twisti@1426 1050 __ push_ptr(G3_scratch); // push c
duke@0 1051 // stack: ..., b, c, a, b, c
duke@0 1052 }
duke@0 1053
duke@0 1054
duke@0 1055 // The spec says that these types can be a mixture of category 1 (1 word)
duke@0 1056 // types and/or category 2 types (long and doubles)
duke@0 1057 void TemplateTable::dup2_x2() {
duke@0 1058 transition(vtos, vtos);
duke@0 1059 // stack: ..., a, b, c, d
twisti@1426 1060 __ load_ptr( 1, Lscratch); // get c
twisti@1426 1061 __ load_ptr( 3, Otos_l1); // get a
twisti@1426 1062 __ store_ptr(3, Lscratch); // put c at 3
twisti@1426 1063 __ store_ptr(1, Otos_l1); // put a at 1
duke@0 1064 // stack: ..., c, b, a, d
twisti@1426 1065 __ load_ptr( 2, G3_scratch); // get b
twisti@1426 1066 __ load_ptr( 0, Otos_l1); // get d
twisti@1426 1067 __ store_ptr(0, G3_scratch); // put b at 0
twisti@1426 1068 __ store_ptr(2, Otos_l1); // put d at 2
duke@0 1069 // stack: ..., c, d, a, b
twisti@1426 1070 __ push_ptr(Lscratch); // push c
twisti@1426 1071 __ push_ptr(Otos_l1); // push d
duke@0 1072 // stack: ..., c, d, a, b, c, d
duke@0 1073 }
duke@0 1074
duke@0 1075
duke@0 1076 void TemplateTable::swap() {
duke@0 1077 transition(vtos, vtos);
duke@0 1078 // stack: ..., a, b
twisti@1426 1079 __ load_ptr( 1, G3_scratch); // get a
twisti@1426 1080 __ load_ptr( 0, Otos_l1); // get b
twisti@1426 1081 __ store_ptr(0, G3_scratch); // put b
twisti@1426 1082 __ store_ptr(1, Otos_l1); // put a
duke@0 1083 // stack: ..., b, a
duke@0 1084 }
duke@0 1085
duke@0 1086
duke@0 1087 void TemplateTable::iop2(Operation op) {
duke@0 1088 transition(itos, itos);
duke@0 1089 __ pop_i(O1);
duke@0 1090 switch (op) {
duke@0 1091 case add: __ add(O1, Otos_i, Otos_i); break;
duke@0 1092 case sub: __ sub(O1, Otos_i, Otos_i); break;
duke@0 1093 // %%%%% Mul may not exist: better to call .mul?
duke@0 1094 case mul: __ smul(O1, Otos_i, Otos_i); break;
twisti@1426 1095 case _and: __ and3(O1, Otos_i, Otos_i); break;
twisti@1426 1096 case _or: __ or3(O1, Otos_i, Otos_i); break;
twisti@1426 1097 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
duke@0 1098 case shl: __ sll(O1, Otos_i, Otos_i); break;
duke@0 1099 case shr: __ sra(O1, Otos_i, Otos_i); break;
duke@0 1100 case ushr: __ srl(O1, Otos_i, Otos_i); break;
duke@0 1101 default: ShouldNotReachHere();
duke@0 1102 }
duke@0 1103 }
duke@0 1104
duke@0 1105
duke@0 1106 void TemplateTable::lop2(Operation op) {
duke@0 1107 transition(ltos, ltos);
duke@0 1108 __ pop_l(O2);
duke@0 1109 switch (op) {
duke@0 1110 #ifdef _LP64
twisti@1426 1111 case add: __ add(O2, Otos_l, Otos_l); break;
twisti@1426 1112 case sub: __ sub(O2, Otos_l, Otos_l); break;
twisti@1426 1113 case _and: __ and3(O2, Otos_l, Otos_l); break;
twisti@1426 1114 case _or: __ or3(O2, Otos_l, Otos_l); break;
twisti@1426 1115 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
duke@0 1116 #else
duke@0 1117 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
duke@0 1118 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
twisti@1426 1119 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
twisti@1426 1120 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
twisti@1426 1121 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
duke@0 1122 #endif
duke@0 1123 default: ShouldNotReachHere();
duke@0 1124 }
duke@0 1125 }
duke@0 1126
duke@0 1127
duke@0 1128 void TemplateTable::idiv() {
duke@0 1129 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
duke@0 1130 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
duke@0 1131
duke@0 1132 transition(itos, itos);
duke@0 1133 __ pop_i(O1); // get 1st op
duke@0 1134
duke@0 1135 // Y contains upper 32 bits of result, set it to 0 or all ones
duke@0 1136 __ wry(G0);
duke@0 1137 __ mov(~0, G3_scratch);
duke@0 1138
duke@0 1139 __ tst(O1);
duke@0 1140 Label neg;
duke@0 1141 __ br(Assembler::negative, true, Assembler::pn, neg);
duke@0 1142 __ delayed()->wry(G3_scratch);
duke@0 1143 __ bind(neg);
duke@0 1144
duke@0 1145 Label ok;
duke@0 1146 __ tst(Otos_i);
duke@0 1147 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
duke@0 1148
duke@0 1149 const int min_int = 0x80000000;
duke@0 1150 Label regular;
duke@0 1151 __ cmp(Otos_i, -1);
duke@0 1152 __ br(Assembler::notEqual, false, Assembler::pt, regular);
duke@0 1153 #ifdef _LP64
duke@0 1154 // Don't put set in delay slot
duke@0 1155 // Set will turn into multiple instructions in 64 bit mode
duke@0 1156 __ delayed()->nop();
duke@0 1157 __ set(min_int, G4_scratch);
duke@0 1158 #else
duke@0 1159 __ delayed()->set(min_int, G4_scratch);
duke@0 1160 #endif
duke@0 1161 Label done;
duke@0 1162 __ cmp(O1, G4_scratch);
duke@0 1163 __ br(Assembler::equal, true, Assembler::pt, done);
duke@0 1164 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
duke@0 1165
duke@0 1166 __ bind(regular);
duke@0 1167 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
duke@0 1168 __ bind(done);
duke@0 1169 }
duke@0 1170
duke@0 1171
duke@0 1172 void TemplateTable::irem() {
duke@0 1173 transition(itos, itos);
duke@0 1174 __ mov(Otos_i, O2); // save divisor
duke@0 1175 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
duke@0 1176 __ smul(Otos_i, O2, Otos_i);
duke@0 1177 __ sub(O1, Otos_i, Otos_i);
duke@0 1178 }
duke@0 1179
duke@0 1180
duke@0 1181 void TemplateTable::lmul() {
duke@0 1182 transition(ltos, ltos);
duke@0 1183 __ pop_l(O2);
duke@0 1184 #ifdef _LP64
duke@0 1185 __ mulx(Otos_l, O2, Otos_l);
duke@0 1186 #else
duke@0 1187 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
duke@0 1188 #endif
duke@0 1189
duke@0 1190 }
duke@0 1191
duke@0 1192
duke@0 1193 void TemplateTable::ldiv() {
duke@0 1194 transition(ltos, ltos);
duke@0 1195
duke@0 1196 // check for zero
duke@0 1197 __ pop_l(O2);
duke@0 1198 #ifdef _LP64
duke@0 1199 __ tst(Otos_l);
duke@0 1200 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
duke@0 1201 __ sdivx(O2, Otos_l, Otos_l);
duke@0 1202 #else
duke@0 1203 __ orcc(Otos_l1, Otos_l2, G0);
duke@0 1204 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
duke@0 1205 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
duke@0 1206 #endif
duke@0 1207 }
duke@0 1208
duke@0 1209
duke@0 1210 void TemplateTable::lrem() {
duke@0 1211 transition(ltos, ltos);
duke@0 1212
duke@0 1213 // check for zero
duke@0 1214 __ pop_l(O2);
duke@0 1215 #ifdef _LP64
duke@0 1216 __ tst(Otos_l);
duke@0 1217 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
duke@0 1218 __ sdivx(O2, Otos_l, Otos_l2);
duke@0 1219 __ mulx (Otos_l2, Otos_l, Otos_l2);
duke@0 1220 __ sub (O2, Otos_l2, Otos_l);
duke@0 1221 #else
duke@0 1222 __ orcc(Otos_l1, Otos_l2, G0);
duke@0 1223 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
duke@0 1224 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
duke@0 1225 #endif
duke@0 1226 }
duke@0 1227
duke@0 1228
duke@0 1229 void TemplateTable::lshl() {
duke@0 1230 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
duke@0 1231
duke@0 1232 __ pop_l(O2); // shift value in O2, O3
duke@0 1233 #ifdef _LP64
duke@0 1234 __ sllx(O2, Otos_i, Otos_l);
duke@0 1235 #else
duke@0 1236 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
duke@0 1237 #endif
duke@0 1238 }
duke@0 1239
duke@0 1240
duke@0 1241 void TemplateTable::lshr() {
duke@0 1242 transition(itos, ltos); // %%%% see lshl comment
duke@0 1243
duke@0 1244 __ pop_l(O2); // shift value in O2, O3
duke@0 1245 #ifdef _LP64
duke@0 1246 __ srax(O2, Otos_i, Otos_l);
duke@0 1247 #else
duke@0 1248 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
duke@0 1249 #endif
duke@0 1250 }
duke@0 1251
duke@0 1252
duke@0 1253
duke@0 1254 void TemplateTable::lushr() {
duke@0 1255 transition(itos, ltos); // %%%% see lshl comment
duke@0 1256
duke@0 1257 __ pop_l(O2); // shift value in O2, O3
duke@0 1258 #ifdef _LP64
duke@0 1259 __ srlx(O2, Otos_i, Otos_l);
duke@0 1260 #else
duke@0 1261 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
duke@0 1262 #endif
duke@0 1263 }
duke@0 1264
duke@0 1265
duke@0 1266 void TemplateTable::fop2(Operation op) {
duke@0 1267 transition(ftos, ftos);
duke@0 1268 switch (op) {
duke@0 1269 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
duke@0 1270 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
duke@0 1271 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
duke@0 1272 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
duke@0 1273 case rem:
duke@0 1274 assert(Ftos_f == F0, "just checking");
duke@0 1275 #ifdef _LP64
duke@0 1276 // LP64 calling conventions use F1, F3 for passing 2 floats
duke@0 1277 __ pop_f(F1);
duke@0 1278 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
duke@0 1279 #else
duke@0 1280 __ pop_i(O0);
duke@0 1281 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
duke@0 1282 __ ld( __ d_tmp, O1 );
duke@0 1283 #endif
duke@0 1284 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
duke@0 1285 assert( Ftos_f == F0, "fix this code" );
duke@0 1286 break;
duke@0 1287
duke@0 1288 default: ShouldNotReachHere();
duke@0 1289 }
duke@0 1290 }
duke@0 1291
duke@0 1292
duke@0 1293 void TemplateTable::dop2(Operation op) {
duke@0 1294 transition(dtos, dtos);
duke@0 1295 switch (op) {
duke@0 1296 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
duke@0 1297 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
duke@0 1298 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
duke@0 1299 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
duke@0 1300 case rem:
duke@0 1301 #ifdef _LP64
duke@0 1302 // Pass arguments in D0, D2
duke@0 1303 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
duke@0 1304 __ pop_d( F0 );
duke@0 1305 #else
duke@0 1306 // Pass arguments in O0O1, O2O3
duke@0 1307 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
duke@0 1308 __ ldd( __ d_tmp, O2 );
duke@0 1309 __ pop_d(Ftos_f);
duke@0 1310 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
duke@0 1311 __ ldd( __ d_tmp, O0 );
duke@0 1312 #endif
duke@0 1313 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
duke@0 1314 assert( Ftos_d == F0, "fix this code" );
duke@0 1315 break;
duke@0 1316
duke@0 1317 default: ShouldNotReachHere();
duke@0 1318 }
duke@0 1319 }
duke@0 1320
duke@0 1321
duke@0 1322 void TemplateTable::ineg() {
duke@0 1323 transition(itos, itos);
duke@0 1324 __ neg(Otos_i);
duke@0 1325 }
duke@0 1326
duke@0 1327
duke@0 1328 void TemplateTable::lneg() {
duke@0 1329 transition(ltos, ltos);
duke@0 1330 #ifdef _LP64
duke@0 1331 __ sub(G0, Otos_l, Otos_l);
duke@0 1332 #else
duke@0 1333 __ lneg(Otos_l1, Otos_l2);
duke@0 1334 #endif
duke@0 1335 }
duke@0 1336
duke@0 1337
duke@0 1338 void TemplateTable::fneg() {
duke@0 1339 transition(ftos, ftos);
duke@0 1340 __ fneg(FloatRegisterImpl::S, Ftos_f);
duke@0 1341 }
duke@0 1342
duke@0 1343
duke@0 1344 void TemplateTable::dneg() {
duke@0 1345 transition(dtos, dtos);
duke@0 1346 // v8 has fnegd if source and dest are the same
duke@0 1347 __ fneg(FloatRegisterImpl::D, Ftos_f);
duke@0 1348 }
duke@0 1349
duke@0 1350
duke@0 1351 void TemplateTable::iinc() {
duke@0 1352 transition(vtos, vtos);
duke@0 1353 locals_index(G3_scratch);
duke@0 1354 __ ldsb(Lbcp, 2, O2); // load constant
duke@0 1355 __ access_local_int(G3_scratch, Otos_i);
duke@0 1356 __ add(Otos_i, O2, Otos_i);
twisti@1426 1357 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
duke@0 1358 }
duke@0 1359
duke@0 1360
duke@0 1361 void TemplateTable::wide_iinc() {
duke@0 1362 transition(vtos, vtos);
duke@0 1363 locals_index_wide(G3_scratch);
duke@0 1364 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
duke@0 1365 __ access_local_int(G3_scratch, Otos_i);
duke@0 1366 __ add(Otos_i, O3, Otos_i);
twisti@1426 1367 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
duke@0 1368 }
duke@0 1369
duke@0 1370
duke@0 1371 void TemplateTable::convert() {
duke@0 1372 // %%%%% Factor this first part accross platforms
duke@0 1373 #ifdef ASSERT
duke@0 1374 TosState tos_in = ilgl;
duke@0 1375 TosState tos_out = ilgl;
duke@0 1376 switch (bytecode()) {
duke@0 1377 case Bytecodes::_i2l: // fall through
duke@0 1378 case Bytecodes::_i2f: // fall through
duke@0 1379 case Bytecodes::_i2d: // fall through
duke@0 1380 case Bytecodes::_i2b: // fall through
duke@0 1381 case Bytecodes::_i2c: // fall through
duke@0 1382 case Bytecodes::_i2s: tos_in = itos; break;
duke@0 1383 case Bytecodes::_l2i: // fall through
duke@0 1384 case Bytecodes::_l2f: // fall through
duke@0 1385 case Bytecodes::_l2d: tos_in = ltos; break;
duke@0 1386 case Bytecodes::_f2i: // fall through
duke@0 1387 case Bytecodes::_f2l: // fall through
duke@0 1388 case Bytecodes::_f2d: tos_in = ftos; break;
duke@0 1389 case Bytecodes::_d2i: // fall through
duke@0 1390 case Bytecodes::_d2l: // fall through
duke@0 1391 case Bytecodes::_d2f: tos_in = dtos; break;
duke@0 1392 default : ShouldNotReachHere();
duke@0 1393 }
duke@0 1394 switch (bytecode()) {
duke@0 1395 case Bytecodes::_l2i: // fall through
duke@0 1396 case Bytecodes::_f2i: // fall through
duke@0 1397 case Bytecodes::_d2i: // fall through
duke@0 1398 case Bytecodes::_i2b: // fall through
duke@0 1399 case Bytecodes::_i2c: // fall through
duke@0 1400 case Bytecodes::_i2s: tos_out = itos; break;
duke@0 1401 case Bytecodes::_i2l: // fall through
duke@0 1402 case Bytecodes::_f2l: // fall through
duke@0 1403 case Bytecodes::_d2l: tos_out = ltos; break;
duke@0 1404 case Bytecodes::_i2f: // fall through
duke@0 1405 case Bytecodes::_l2f: // fall through
duke@0 1406 case Bytecodes::_d2f: tos_out = ftos; break;
duke@0 1407 case Bytecodes::_i2d: // fall through
duke@0 1408 case Bytecodes::_l2d: // fall through
duke@0 1409 case Bytecodes::_f2d: tos_out = dtos; break;
duke@0 1410 default : ShouldNotReachHere();
duke@0 1411 }
duke@0 1412 transition(tos_in, tos_out);
duke@0 1413 #endif
duke@0 1414
duke@0 1415
duke@0 1416 // Conversion
duke@0 1417 Label done;
duke@0 1418 switch (bytecode()) {
duke@0 1419 case Bytecodes::_i2l:
duke@0 1420 #ifdef _LP64
duke@0 1421 // Sign extend the 32 bits
duke@0 1422 __ sra ( Otos_i, 0, Otos_l );
duke@0 1423 #else
duke@0 1424 __ addcc(Otos_i, 0, Otos_l2);
duke@0 1425 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
duke@0 1426 __ delayed()->clr(Otos_l1);
duke@0 1427 __ set(~0, Otos_l1);
duke@0 1428 #endif
duke@0 1429 break;
duke@0 1430
duke@0 1431 case Bytecodes::_i2f:
duke@0 1432 __ st(Otos_i, __ d_tmp );
duke@0 1433 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
duke@0 1434 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
duke@0 1435 break;
duke@0 1436
duke@0 1437 case Bytecodes::_i2d:
duke@0 1438 __ st(Otos_i, __ d_tmp);
duke@0 1439 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
duke@0 1440 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
duke@0 1441 break;
duke@0 1442
duke@0 1443 case Bytecodes::_i2b:
duke@0 1444 __ sll(Otos_i, 24, Otos_i);
duke@0 1445 __ sra(Otos_i, 24, Otos_i);
duke@0 1446 break;
duke@0 1447
duke@0 1448 case Bytecodes::_i2c:
duke@0 1449 __ sll(Otos_i, 16, Otos_i);
duke@0 1450 __ srl(Otos_i, 16, Otos_i);
duke@0 1451 break;
duke@0 1452
duke@0 1453 case Bytecodes::_i2s:
duke@0 1454 __ sll(Otos_i, 16, Otos_i);
duke@0 1455 __ sra(Otos_i, 16, Otos_i);
duke@0 1456 break;
duke@0 1457
duke@0 1458 case Bytecodes::_l2i:
duke@0 1459 #ifndef _LP64
duke@0 1460 __ mov(Otos_l2, Otos_i);
duke@0 1461 #else
duke@0 1462 // Sign-extend into the high 32 bits
duke@0 1463 __ sra(Otos_l, 0, Otos_i);
duke@0 1464 #endif
duke@0 1465 break;
duke@0 1466
duke@0 1467 case Bytecodes::_l2f:
duke@0 1468 case Bytecodes::_l2d:
duke@0 1469 __ st_long(Otos_l, __ d_tmp);
duke@0 1470 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
duke@0 1471
duke@0 1472 if (VM_Version::v9_instructions_work()) {
duke@0 1473 if (bytecode() == Bytecodes::_l2f) {
duke@0 1474 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
duke@0 1475 } else {
duke@0 1476 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
duke@0 1477 }
duke@0 1478 } else {
duke@0 1479 __ call_VM_leaf(
duke@0 1480 Lscratch,
duke@0 1481 bytecode() == Bytecodes::_l2f
duke@0 1482 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
duke@0 1483 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
duke@0 1484 );
duke@0 1485 }
duke@0 1486 break;
duke@0 1487
duke@0 1488 case Bytecodes::_f2i: {
duke@0 1489 Label isNaN;
duke@0 1490 // result must be 0 if value is NaN; test by comparing value to itself
duke@0 1491 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
duke@0 1492 // According to the v8 manual, you have to have a non-fp instruction
duke@0 1493 // between fcmp and fb.
duke@0 1494 if (!VM_Version::v9_instructions_work()) {
duke@0 1495 __ nop();
duke@0 1496 }
duke@0 1497 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
duke@0 1498 __ delayed()->clr(Otos_i); // NaN
duke@0 1499 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
duke@0 1500 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
duke@0 1501 __ ld(__ d_tmp, Otos_i);
duke@0 1502 __ bind(isNaN);
duke@0 1503 }
duke@0 1504 break;
duke@0 1505
duke@0 1506 case Bytecodes::_f2l:
duke@0 1507 // must uncache tos
duke@0 1508 __ push_f();
duke@0 1509 #ifdef _LP64
duke@0 1510 __ pop_f(F1);
duke@0 1511 #else
duke@0 1512 __ pop_i(O0);
duke@0 1513 #endif
duke@0 1514 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
duke@0 1515 break;
duke@0 1516
duke@0 1517 case Bytecodes::_f2d:
duke@0 1518 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
duke@0 1519 break;
duke@0 1520
duke@0 1521 case Bytecodes::_d2i:
duke@0 1522 case Bytecodes::_d2l:
duke@0 1523 // must uncache tos
duke@0 1524 __ push_d();
duke@0 1525 #ifdef _LP64
duke@0 1526 // LP64 calling conventions pass first double arg in D0
duke@0 1527 __ pop_d( Ftos_d );
duke@0 1528 #else
duke@0 1529 __ pop_i( O0 );
duke@0 1530 __ pop_i( O1 );
duke@0 1531 #endif
duke@0 1532 __ call_VM_leaf(Lscratch,
duke@0 1533 bytecode() == Bytecodes::_d2i
duke@0 1534 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
duke@0 1535 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
duke@0 1536 break;
duke@0 1537
duke@0 1538 case Bytecodes::_d2f:
duke@0 1539 if (VM_Version::v9_instructions_work()) {
duke@0 1540 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
duke@0 1541 }
duke@0 1542 else {
duke@0 1543 // must uncache tos
duke@0 1544 __ push_d();
duke@0 1545 __ pop_i(O0);
duke@0 1546 __ pop_i(O1);
duke@0 1547 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
duke@0 1548 }
duke@0 1549 break;
duke@0 1550
duke@0 1551 default: ShouldNotReachHere();
duke@0 1552 }
duke@0 1553 __ bind(done);
duke@0 1554 }
duke@0 1555
duke@0 1556
duke@0 1557 void TemplateTable::lcmp() {
duke@0 1558 transition(ltos, itos);
duke@0 1559
duke@0 1560 #ifdef _LP64
duke@0 1561 __ pop_l(O1); // pop off value 1, value 2 is in O0
duke@0 1562 __ lcmp( O1, Otos_l, Otos_i );
duke@0 1563 #else
duke@0 1564 __ pop_l(O2); // cmp O2,3 to O0,1
duke@0 1565 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
duke@0 1566 #endif
duke@0 1567 }
duke@0 1568
duke@0 1569
duke@0 1570 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
duke@0 1571
duke@0 1572 if (is_float) __ pop_f(F2);
duke@0 1573 else __ pop_d(F2);
duke@0 1574
duke@0 1575 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
duke@0 1576
duke@0 1577 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
duke@0 1578 }
duke@0 1579
duke@0 1580 void TemplateTable::branch(bool is_jsr, bool is_wide) {
duke@0 1581 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
duke@0 1582 __ verify_oop(Lmethod);
duke@0 1583 __ verify_thread();
duke@0 1584
duke@0 1585 const Register O2_bumped_count = O2;
duke@0 1586 __ profile_taken_branch(G3_scratch, O2_bumped_count);
duke@0 1587
duke@0 1588 // get (wide) offset to O1_disp
duke@0 1589 const Register O1_disp = O1;
duke@0 1590 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
duke@0 1591 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
duke@0 1592
duke@0 1593 // Handle all the JSR stuff here, then exit.
duke@0 1594 // It's much shorter and cleaner than intermingling with the
twisti@605 1595 // non-JSR normal-branch stuff occurring below.
duke@0 1596 if( is_jsr ) {
duke@0 1597 // compute return address as bci in Otos_i
twisti@727 1598 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
duke@0 1599 __ sub(Lbcp, G3_scratch, G3_scratch);
duke@0 1600 __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
duke@0 1601
duke@0 1602 // Bump Lbcp to target of JSR
duke@0 1603 __ add(Lbcp, O1_disp, Lbcp);
duke@0 1604 // Push returnAddress for "ret" on stack
twisti@1426 1605 __ push_ptr(Otos_i);
duke@0 1606 // And away we go!
duke@0 1607 __ dispatch_next(vtos);
duke@0 1608 return;
duke@0 1609 }
duke@0 1610
duke@0 1611 // Normal (non-jsr) branch handling
duke@0 1612
duke@0 1613 // Save the current Lbcp
duke@0 1614 const Register O0_cur_bcp = O0;
duke@0 1615 __ mov( Lbcp, O0_cur_bcp );
duke@0 1616
iveresov@1703 1617
duke@0 1618 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
duke@0 1619 if ( increment_invocation_counter_for_backward_branches ) {
duke@0 1620 Label Lforward;
duke@0 1621 // check branch direction
duke@0 1622 __ br( Assembler::positive, false, Assembler::pn, Lforward );
duke@0 1623 // Bump bytecode pointer by displacement (take the branch)
duke@0 1624 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
duke@0 1625
iveresov@1703 1626 if (TieredCompilation) {
iveresov@1703 1627 Label Lno_mdo, Loverflow;
iveresov@1703 1628 int increment = InvocationCounter::count_increment;
iveresov@1703 1629 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
iveresov@1703 1630 if (ProfileInterpreter) {
iveresov@1703 1631 // If no method data exists, go to profile_continue.
iveresov@1703 1632 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
iveresov@1703 1633 __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
iveresov@1703 1634 __ delayed()->nop();
iveresov@1703 1635
iveresov@1703 1636 // Increment backedge counter in the MDO
iveresov@1703 1637 Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
iveresov@1703 1638 in_bytes(InvocationCounter::counter_offset()));
iveresov@1703 1639 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
iveresov@1703 1640 Assembler::notZero, &Lforward);
iveresov@1703 1641 __ ba(false, Loverflow);
iveresov@1703 1642 __ delayed()->nop();
duke@0 1643 }
iveresov@1703 1644
iveresov@1703 1645 // If there's no MDO, increment counter in methodOop
iveresov@1703 1646 __ bind(Lno_mdo);
iveresov@1703 1647 Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
iveresov@1703 1648 in_bytes(InvocationCounter::counter_offset()));
iveresov@1703 1649 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
iveresov@1703 1650 Assembler::notZero, &Lforward);
iveresov@1703 1651 __ bind(Loverflow);
iveresov@1703 1652
iveresov@1703 1653 // notify point for loop, pass branch bytecode
iveresov@1703 1654 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
iveresov@1703 1655
iveresov@1703 1656 // Was an OSR adapter generated?
iveresov@1703 1657 // O0 = osr nmethod
iveresov@1703 1658 __ br_null(O0, false, Assembler::pn, Lforward);
iveresov@1703 1659 __ delayed()->nop();
iveresov@1703 1660
iveresov@1703 1661 // Has the nmethod been invalidated already?
iveresov@1703 1662 __ ld(O0, nmethod::entry_bci_offset(), O2);
iveresov@1703 1663 __ cmp(O2, InvalidOSREntryBci);
iveresov@1703 1664 __ br(Assembler::equal, false, Assembler::pn, Lforward);
iveresov@1703 1665 __ delayed()->nop();
iveresov@1703 1666
iveresov@1703 1667 // migrate the interpreter frame off of the stack
iveresov@1703 1668
iveresov@1703 1669 __ mov(G2_thread, L7);
iveresov@1703 1670 // save nmethod
iveresov@1703 1671 __ mov(O0, L6);
iveresov@1703 1672 __ set_last_Java_frame(SP, noreg);
iveresov@1703 1673 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
iveresov@1703 1674 __ reset_last_Java_frame();
iveresov@1703 1675 __ mov(L7, G2_thread);
iveresov@1703 1676
iveresov@1703 1677 // move OSR nmethod to I1
iveresov@1703 1678 __ mov(L6, I1);
iveresov@1703 1679
iveresov@1703 1680 // OSR buffer to I0
iveresov@1703 1681 __ mov(O0, I0);
iveresov@1703 1682
iveresov@1703 1683 // remove the interpreter frame
iveresov@1703 1684 __ restore(I5_savedSP, 0, SP);
iveresov@1703 1685
iveresov@1703 1686 // Jump to the osr code.
iveresov@1703 1687 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
iveresov@1703 1688 __ jmp(O2, G0);
iveresov@1703 1689 __ delayed()->nop();
iveresov@1703 1690
duke@0 1691 } else {
iveresov@1703 1692 // Update Backedge branch separately from invocations
iveresov@1703 1693 const Register G4_invoke_ctr = G4;
iveresov@1703 1694 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
iveresov@1703 1695 if (ProfileInterpreter) {
iveresov@2003 1696 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
iveresov@1703 1697 if (UseOnStackReplacement) {
iveresov@1703 1698 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
iveresov@1703 1699 }
iveresov@1703 1700 } else {
iveresov@1703 1701 if (UseOnStackReplacement) {
iveresov@1703 1702 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
iveresov@1703 1703 }
duke@0 1704 }
duke@0 1705 }
duke@0 1706
duke@0 1707 __ bind(Lforward);
duke@0 1708 } else
duke@0 1709 // Bump bytecode pointer by displacement (take the branch)
duke@0 1710 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
duke@0 1711
duke@0 1712 // continue with bytecode @ target
duke@0 1713 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
duke@0 1714 // %%%%% and changing dispatch_next to dispatch_only
duke@0 1715 __ dispatch_next(vtos);
duke@0 1716 }
duke@0 1717
duke@0 1718
duke@0 1719 // Note Condition in argument is TemplateTable::Condition
duke@0 1720 // arg scope is within class scope
duke@0 1721
duke@0 1722 void TemplateTable::if_0cmp(Condition cc) {
duke@0 1723 // no pointers, integer only!
duke@0 1724 transition(itos, vtos);
duke@0 1725 // assume branch is more often taken than not (loops use backward branches)
duke@0 1726 __ cmp( Otos_i, 0);
duke@0 1727 __ if_cmp(ccNot(cc), false);
duke@0 1728 }
duke@0 1729
duke@0 1730
duke@0 1731 void TemplateTable::if_icmp(Condition cc) {
duke@0 1732 transition(itos, vtos);
duke@0 1733 __ pop_i(O1);
duke@0 1734 __ cmp(O1, Otos_i);
duke@0 1735 __ if_cmp(ccNot(cc), false);
duke@0 1736 }
duke@0 1737
duke@0 1738
duke@0 1739 void TemplateTable::if_nullcmp(Condition cc) {
duke@0 1740 transition(atos, vtos);
duke@0 1741 __ tst(Otos_i);
duke@0 1742 __ if_cmp(ccNot(cc), true);
duke@0 1743 }
duke@0 1744
duke@0 1745
duke@0 1746 void TemplateTable::if_acmp(Condition cc) {
duke@0 1747 transition(atos, vtos);
duke@0 1748 __ pop_ptr(O1);
duke@0 1749 __ verify_oop(O1);
duke@0 1750 __ verify_oop(Otos_i);
duke@0 1751 __ cmp(O1, Otos_i);
duke@0 1752 __ if_cmp(ccNot(cc), true);
duke@0 1753 }
duke@0 1754
duke@0 1755
duke@0 1756
duke@0 1757 void TemplateTable::ret() {
duke@0 1758 transition(vtos, vtos);
duke@0 1759 locals_index(G3_scratch);
duke@0 1760 __ access_local_returnAddress(G3_scratch, Otos_i);
duke@0 1761 // Otos_i contains the bci, compute the bcp from that
duke@0 1762
duke@0 1763 #ifdef _LP64
duke@0 1764 #ifdef ASSERT
duke@0 1765 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
duke@0 1766 // the result. The return address (really a BCI) was stored with an
duke@0 1767 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
duke@0 1768 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
duke@0 1769 // loaded value.
duke@0 1770 { Label zzz ;
duke@0 1771 __ set (65536, G3_scratch) ;
duke@0 1772 __ cmp (Otos_i, G3_scratch) ;
duke@0 1773 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
duke@0 1774 __ delayed()->nop();
duke@0 1775 __ stop("BCI is in the wrong register half?");
duke@0 1776 __ bind (zzz) ;
duke@0 1777 }
duke@0 1778 #endif
duke@0 1779 #endif
duke@0 1780
duke@0 1781 __ profile_ret(vtos, Otos_i, G4_scratch);
duke@0 1782
twisti@727 1783 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
duke@0 1784 __ add(G3_scratch, Otos_i, G3_scratch);
duke@0 1785 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
duke@0 1786 __ dispatch_next(vtos);
duke@0 1787 }
duke@0 1788
duke@0 1789
duke@0 1790 void TemplateTable::wide_ret() {
duke@0 1791 transition(vtos, vtos);
duke@0 1792 locals_index_wide(G3_scratch);
duke@0 1793 __ access_local_returnAddress(G3_scratch, Otos_i);
duke@0 1794 // Otos_i contains the bci, compute the bcp from that
duke@0 1795
duke@0 1796 __ profile_ret(vtos, Otos_i, G4_scratch);
duke@0 1797
twisti@727 1798 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
duke@0 1799 __ add(G3_scratch, Otos_i, G3_scratch);
duke@0 1800 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
duke@0 1801 __ dispatch_next(vtos);
duke@0 1802 }
duke@0 1803
duke@0 1804
duke@0 1805 void TemplateTable::tableswitch() {
duke@0 1806 transition(itos, vtos);
duke@0 1807 Label default_case, continue_execution;
duke@0 1808
duke@0 1809 // align bcp
duke@0 1810 __ add(Lbcp, BytesPerInt, O1);
duke@0 1811 __ and3(O1, -BytesPerInt, O1);
duke@0 1812 // load lo, hi
duke@0 1813 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
duke@0 1814 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
duke@0 1815 #ifdef _LP64
duke@0 1816 // Sign extend the 32 bits
duke@0 1817 __ sra ( Otos_i, 0, Otos_i );
duke@0 1818 #endif /* _LP64 */
duke@0 1819
duke@0 1820 // check against lo & hi
duke@0 1821 __ cmp( Otos_i, O2);
duke@0 1822 __ br( Assembler::less, false, Assembler::pn, default_case);
duke@0 1823 __ delayed()->cmp( Otos_i, O3 );
duke@0 1824 __ br( Assembler::greater, false, Assembler::pn, default_case);
duke@0 1825 // lookup dispatch offset
duke@0 1826 __ delayed()->sub(Otos_i, O2, O2);
duke@0 1827 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
duke@0 1828 __ sll(O2, LogBytesPerInt, O2);
duke@0 1829 __ add(O2, 3 * BytesPerInt, O2);
duke@0 1830 __ ba(false, continue_execution);
duke@0 1831 __ delayed()->ld(O1, O2, O2);
duke@0 1832 // handle default
duke@0 1833 __ bind(default_case);
duke@0 1834 __ profile_switch_default(O3);
duke@0 1835 __ ld(O1, 0, O2); // get default offset
duke@0 1836 // continue execution
duke@0 1837 __ bind(continue_execution);
duke@0 1838 __ add(Lbcp, O2, Lbcp);
duke@0 1839 __ dispatch_next(vtos);
duke@0 1840 }
duke@0 1841
duke@0 1842
duke@0 1843 void TemplateTable::lookupswitch() {
duke@0 1844 transition(itos, itos);
duke@0 1845 __ stop("lookupswitch bytecode should have been rewritten");
duke@0 1846 }
duke@0 1847
duke@0 1848 void TemplateTable::fast_linearswitch() {
duke@0 1849 transition(itos, vtos);
duke@0 1850 Label loop_entry, loop, found, continue_execution;
duke@0 1851 // align bcp
duke@0 1852 __ add(Lbcp, BytesPerInt, O1);
duke@0 1853 __ and3(O1, -BytesPerInt, O1);
duke@0 1854 // set counter
duke@0 1855 __ ld(O1, BytesPerInt, O2);
duke@0 1856 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
duke@0 1857 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
duke@0 1858 __ ba(false, loop_entry);
duke@0 1859 __ delayed()->add(O3, O2, O2); // counter now points past last pair
duke@0 1860
duke@0 1861 // table search
duke@0 1862 __ bind(loop);
duke@0 1863 __ cmp(O4, Otos_i);
duke@0 1864 __ br(Assembler::equal, true, Assembler::pn, found);
duke@0 1865 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
duke@0 1866 __ inc(O3, 2 * BytesPerInt);
duke@0 1867
duke@0 1868 __ bind(loop_entry);
duke@0 1869 __ cmp(O2, O3);
duke@0 1870 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
duke@0 1871 __ delayed()->ld(O3, 0, O4);
duke@0 1872
duke@0 1873 // default case
duke@0 1874 __ ld(O1, 0, O4); // get default offset
duke@0 1875 if (ProfileInterpreter) {
duke@0 1876 __ profile_switch_default(O3);
duke@0 1877 __ ba(false, continue_execution);
duke@0 1878 __ delayed()->nop();
duke@0 1879 }
duke@0 1880
duke@0 1881 // entry found -> get offset
duke@0 1882 __ bind(found);
duke@0 1883 if (ProfileInterpreter) {
duke@0 1884 __ sub(O3, O1, O3);
duke@0 1885 __ sub(O3, 2*BytesPerInt, O3);
duke@0 1886 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
duke@0 1887 __ profile_switch_case(O3, O1, O2, G3_scratch);
duke@0 1888
duke@0 1889 __ bind(continue_execution);
duke@0 1890 }
duke@0 1891 __ add(Lbcp, O4, Lbcp);
duke@0 1892 __ dispatch_next(vtos);
duke@0 1893 }
duke@0 1894
duke@0 1895
duke@0 1896 void TemplateTable::fast_binaryswitch() {
duke@0 1897 transition(itos, vtos);
duke@0 1898 // Implementation using the following core algorithm: (copied from Intel)
duke@0 1899 //
duke@0 1900 // int binary_search(int key, LookupswitchPair* array, int n) {
duke@0 1901 // // Binary search according to "Methodik des Programmierens" by
duke@0 1902 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
duke@0 1903 // int i = 0;
duke@0 1904 // int j = n;
duke@0 1905 // while (i+1 < j) {
duke@0 1906 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
duke@0 1907 // // with Q: for all i: 0 <= i < n: key < a[i]
duke@0 1908 // // where a stands for the array and assuming that the (inexisting)
duke@0 1909 // // element a[n] is infinitely big.
duke@0 1910 // int h = (i + j) >> 1;
duke@0 1911 // // i < h < j
duke@0 1912 // if (key < array[h].fast_match()) {
duke@0 1913 // j = h;
duke@0 1914 // } else {
duke@0 1915 // i = h;
duke@0 1916 // }
duke@0 1917 // }
duke@0 1918 // // R: a[i] <= key < a[i+1] or Q
duke@0 1919 // // (i.e., if key is within array, i is the correct index)
duke@0 1920 // return i;
duke@0 1921 // }
duke@0 1922
duke@0 1923 // register allocation
duke@0 1924 assert(Otos_i == O0, "alias checking");
duke@0 1925 const Register Rkey = Otos_i; // already set (tosca)
duke@0 1926 const Register Rarray = O1;
duke@0 1927 const Register Ri = O2;
duke@0 1928 const Register Rj = O3;
duke@0 1929 const Register Rh = O4;
duke@0 1930 const Register Rscratch = O5;
duke@0 1931
duke@0 1932 const int log_entry_size = 3;
duke@0 1933 const int entry_size = 1 << log_entry_size;
duke@0 1934
duke@0 1935 Label found;
duke@0 1936 // Find Array start
duke@0 1937 __ add(Lbcp, 3 * BytesPerInt, Rarray);
duke@0 1938 __ and3(Rarray, -BytesPerInt, Rarray);
duke@0 1939 // initialize i & j (in delay slot)
duke@0 1940 __ clr( Ri );
duke@0 1941
duke@0 1942 // and start
duke@0 1943 Label entry;
duke@0 1944 __ ba(false, entry);
duke@0 1945 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
duke@0 1946 // (Rj is already in the native byte-ordering.)
duke@0 1947
duke@0 1948 // binary search loop
duke@0 1949 { Label loop;
duke@0 1950 __ bind( loop );
duke@0 1951 // int h = (i + j) >> 1;
duke@0 1952 __ sra( Rh, 1, Rh );
duke@0 1953 // if (key < array[h].fast_match()) {
duke@0 1954 // j = h;
duke@0 1955 // } else {
duke@0 1956 // i = h;
duke@0 1957 // }
duke@0 1958 __ sll( Rh, log_entry_size, Rscratch );
duke@0 1959 __ ld( Rarray, Rscratch, Rscratch );
duke@0 1960 // (Rscratch is already in the native byte-ordering.)
duke@0 1961 __ cmp( Rkey, Rscratch );
duke@0 1962 if ( VM_Version::v9_instructions_work() ) {
duke@0 1963 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
duke@0 1964 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
duke@0 1965 }
duke@0 1966 else {
duke@0 1967 Label end_of_if;
duke@0 1968 __ br( Assembler::less, true, Assembler::pt, end_of_if );
duke@0 1969 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
duke@0 1970 __ mov( Rh, Ri ); // else i = h
duke@0 1971 __ bind(end_of_if); // }
duke@0 1972 }
duke@0 1973
duke@0 1974 // while (i+1 < j)
duke@0 1975 __ bind( entry );
duke@0 1976 __ add( Ri, 1, Rscratch );
duke@0 1977 __ cmp(Rscratch, Rj);
duke@0 1978 __ br( Assembler::less, true, Assembler::pt, loop );
duke@0 1979 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
duke@0 1980 }
duke@0 1981
duke@0 1982 // end of binary search, result index is i (must check again!)
duke@0 1983 Label default_case;
duke@0 1984 Label continue_execution;
duke@0 1985 if (ProfileInterpreter) {
duke@0 1986 __ mov( Ri, Rh ); // Save index in i for profiling
duke@0 1987 }
duke@0 1988 __ sll( Ri, log_entry_size, Ri );
duke@0 1989 __ ld( Rarray, Ri, Rscratch );
duke@0 1990 // (Rscratch is already in the native byte-ordering.)
duke@0 1991 __ cmp( Rkey, Rscratch );
duke@0 1992 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
duke@0 1993 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
duke@0 1994
duke@0 1995 // entry found -> j = offset
duke@0 1996 __ inc( Ri, BytesPerInt );
duke@0 1997 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
duke@0 1998 __ ld( Rarray, Ri, Rj );
duke@0 1999 // (Rj is already in the native byte-ordering.)
duke@0 2000
duke@0 2001 if (ProfileInterpreter) {
duke@0 2002 __ ba(false, continue_execution);
duke@0 2003 __ delayed()->nop();
duke@0 2004 }
duke@0 2005
duke@0 2006 __ bind(default_case); // fall through (if not profiling)
duke@0 2007 __ profile_switch_default(Ri);
duke@0 2008
duke@0 2009 __ bind(continue_execution);
duke@0 2010 __ add( Lbcp, Rj, Lbcp );
duke@0 2011 __ dispatch_next( vtos );
duke@0 2012 }
duke@0 2013
duke@0 2014
duke@0 2015 void TemplateTable::_return(TosState state) {
duke@0 2016 transition(state, state);
duke@0 2017 assert(_desc->calls_vm(), "inconsistent calls_vm information");
duke@0 2018
duke@0 2019 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
duke@0 2020 assert(state == vtos, "only valid state");
duke@0 2021 __ mov(G0, G3_scratch);
duke@0 2022 __ access_local_ptr(G3_scratch, Otos_i);
coleenp@113 2023 __ load_klass(Otos_i, O2);
duke@0 2024 __ set(JVM_ACC_HAS_FINALIZER, G3);
duke@0 2025 __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
duke@0 2026 __ andcc(G3, O2, G0);
duke@0 2027 Label skip_register_finalizer;
duke@0 2028 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
duke@0 2029 __ delayed()->nop();
duke@0 2030
duke@0 2031 // Call out to do finalizer registration
duke@0 2032 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
duke@0 2033
duke@0 2034 __ bind(skip_register_finalizer);
duke@0 2035 }
duke@0 2036
duke@0 2037 __ remove_activation(state, /* throw_monitor_exception */ true);
duke@0 2038
duke@0 2039 // The caller's SP was adjusted upon method entry to accomodate
duke@0 2040 // the callee's non-argument locals. Undo that adjustment.
duke@0 2041 __ ret(); // return to caller
duke@0 2042 __ delayed()->restore(I5_savedSP, G0, SP);
duke@0 2043 }
duke@0 2044
duke@0 2045
duke@0 2046 // ----------------------------------------------------------------------------
duke@0 2047 // Volatile variables demand their effects be made known to all CPU's in
duke@0 2048 // order. Store buffers on most chips allow reads & writes to reorder; the
duke@0 2049 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
duke@0 2050 // memory barrier (i.e., it's not sufficient that the interpreter does not
duke@0 2051 // reorder volatile references, the hardware also must not reorder them).
duke@0 2052 //
duke@0 2053 // According to the new Java Memory Model (JMM):
duke@0 2054 // (1) All volatiles are serialized wrt to each other.
duke@0 2055 // ALSO reads & writes act as aquire & release, so:
duke@0 2056 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
duke@0 2057 // the read float up to before the read. It's OK for non-volatile memory refs
duke@0 2058 // that happen before the volatile read to float down below it.
duke@0 2059 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
duke@0 2060 // that happen BEFORE the write float down to after the write. It's OK for
duke@0 2061 // non-volatile memory refs that happen after the volatile write to float up
duke@0 2062 // before it.
duke@0 2063 //
duke@0 2064 // We only put in barriers around volatile refs (they are expensive), not
duke@0 2065 // _between_ memory refs (that would require us to track the flavor of the
duke@0 2066 // previous memory refs). Requirements (2) and (3) require some barriers
duke@0 2067 // before volatile stores and after volatile loads. These nearly cover
duke@0 2068 // requirement (1) but miss the volatile-store-volatile-load case. This final
duke@0 2069 // case is placed after volatile-stores although it could just as well go
duke@0 2070 // before volatile-loads.
duke@0 2071 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
duke@0 2072 // Helper function to insert a is-volatile test and memory barrier
duke@0 2073 // All current sparc implementations run in TSO, needing only StoreLoad
duke@0 2074 if ((order_constraint & Assembler::StoreLoad) == 0) return;
duke@0 2075 __ membar( order_constraint );
duke@0 2076 }
duke@0 2077
duke@0 2078 // ----------------------------------------------------------------------------
jrose@1485 2079 void TemplateTable::resolve_cache_and_index(int byte_no,
jrose@1485 2080 Register result,
jrose@1485 2081 Register Rcache,
jrose@1485 2082 Register index,
jrose@1485 2083 size_t index_size) {
duke@0 2084 // Depends on cpCacheOop layout!
duke@0 2085 Label resolved;
duke@0 2086
jrose@1485 2087 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
jrose@1485 2088 if (byte_no == f1_oop) {
jrose@1485 2089 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
jrose@1485 2090 // This kind of CP cache entry does not need to match the flags byte, because
jrose@1485 2091 // there is a 1-1 relation between bytecode type and CP entry type.
jrose@1485 2092 assert_different_registers(result, Rcache);
twisti@1423 2093 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
jrose@1485 2094 ConstantPoolCacheEntry::f1_offset(), result);
jrose@1485 2095 __ tst(result);
twisti@1423 2096 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
twisti@1423 2097 __ delayed()->set((int)bytecode(), O1);
twisti@1423 2098 } else {
jrose@1485 2099 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
jrose@1485 2100 assert(result == noreg, ""); //else change code for setting result
jrose@1485 2101 const int shift_count = (1 + byte_no)*BitsPerByte;
jrose@1485 2102
twisti@1423 2103 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
twisti@1423 2104 ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
twisti@1423 2105
twisti@1423 2106 __ srl( Lbyte_code, shift_count, Lbyte_code );
twisti@1423 2107 __ and3( Lbyte_code, 0xFF, Lbyte_code );
twisti@1423 2108 __ cmp( Lbyte_code, (int)bytecode());
twisti@1423 2109 __ br( Assembler::equal, false, Assembler::pt, resolved);
twisti@1423 2110 __ delayed()->set((int)bytecode(), O1);
twisti@1423 2111 }
duke@0 2112
duke@0 2113 address entry;
duke@0 2114 switch (bytecode()) {
duke@0 2115 case Bytecodes::_getstatic : // fall through
duke@0 2116 case Bytecodes::_putstatic : // fall through
duke@0 2117 case Bytecodes::_getfield : // fall through
duke@0 2118 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
duke@0 2119 case Bytecodes::_invokevirtual : // fall through
duke@0 2120 case Bytecodes::_invokespecial : // fall through
duke@0 2121 case Bytecodes::_invokestatic : // fall through
duke@0 2122 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
twisti@1423 2123 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
jrose@1522 2124 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
jrose@1522 2125 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
duke@0 2126 default : ShouldNotReachHere(); break;
duke@0 2127 }
duke@0 2128 // first time invocation - must resolve first
duke@0 2129 __ call_VM(noreg, entry, O1);
duke@0 2130 // Update registers with resolved info
jrose@1485 2131 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
jrose@1485 2132 if (result != noreg)
jrose@1485 2133 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
jrose@1485 2134 ConstantPoolCacheEntry::f1_offset(), result);
duke@0 2135 __ bind(resolved);
duke@0 2136 }
duke@0 2137
duke@0 2138 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
duke@0 2139 Register Rmethod,
duke@0 2140 Register Ritable_index,
duke@0 2141 Register Rflags,
duke@0 2142 bool is_invokevirtual,
jrose@1485 2143 bool is_invokevfinal,
jrose@1485 2144 bool is_invokedynamic) {
duke@0 2145 // Uses both G3_scratch and G4_scratch
duke@0 2146 Register Rcache = G3_scratch;
duke@0 2147 Register Rscratch = G4_scratch;
duke@0 2148 assert_different_registers(Rcache, Rmethod, Ritable_index);
duke@0 2149
duke@0 2150 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2151
duke@0 2152 // determine constant pool cache field offsets
duke@0 2153 const int method_offset = in_bytes(
duke@0 2154 cp_base_offset +
duke@0 2155 (is_invokevirtual
duke@0 2156 ? ConstantPoolCacheEntry::f2_offset()
duke@0 2157 : ConstantPoolCacheEntry::f1_offset()
duke@0 2158 )
duke@0 2159 );
duke@0 2160 const int flags_offset = in_bytes(cp_base_offset +
duke@0 2161 ConstantPoolCacheEntry::flags_offset());
duke@0 2162 // access constant pool cache fields
duke@0 2163 const int index_offset = in_bytes(cp_base_offset +
duke@0 2164 ConstantPoolCacheEntry::f2_offset());
duke@0 2165
duke@0 2166 if (is_invokevfinal) {
duke@0 2167 __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
jrose@1485 2168 __ ld_ptr(Rcache, method_offset, Rmethod);
jrose@1485 2169 } else if (byte_no == f1_oop) {
jrose@1485 2170 // Resolved f1_oop goes directly into 'method' register.
jrose@1485 2171 resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
duke@0 2172 } else {
jrose@1485 2173 resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
jrose@1485 2174 __ ld_ptr(Rcache, method_offset, Rmethod);
duke@0 2175 }
duke@0 2176
duke@0 2177 if (Ritable_index != noreg) {
twisti@727 2178 __ ld_ptr(Rcache, index_offset, Ritable_index);
duke@0 2179 }
twisti@727 2180 __ ld_ptr(Rcache, flags_offset, Rflags);
duke@0 2181 }
duke@0 2182
duke@0 2183 // The Rcache register must be set before call
duke@0 2184 void TemplateTable::load_field_cp_cache_entry(Register Robj,
duke@0 2185 Register Rcache,
duke@0 2186 Register index,
duke@0 2187 Register Roffset,
duke@0 2188 Register Rflags,
duke@0 2189 bool is_static) {
duke@0 2190 assert_different_registers(Rcache, Rflags, Roffset);
duke@0 2191
duke@0 2192 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2193
twisti@727 2194 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
twisti@727 2195 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
duke@0 2196 if (is_static) {
twisti@727 2197 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
duke@0 2198 }
duke@0 2199 }
duke@0 2200
duke@0 2201 // The registers Rcache and index expected to be set before call.
duke@0 2202 // Correct values of the Rcache and index registers are preserved.
duke@0 2203 void TemplateTable::jvmti_post_field_access(Register Rcache,
duke@0 2204 Register index,
duke@0 2205 bool is_static,
duke@0 2206 bool has_tos) {
duke@0 2207 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2208
duke@0 2209 if (JvmtiExport::can_post_field_access()) {
duke@0 2210 // Check to see if a field access watch has been set before we take
duke@0 2211 // the time to call into the VM.
duke@0 2212 Label Label1;
duke@0 2213 assert_different_registers(Rcache, index, G1_scratch);
twisti@727 2214 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
duke@0 2215 __ load_contents(get_field_access_count_addr, G1_scratch);
duke@0 2216 __ tst(G1_scratch);
duke@0 2217 __ br(Assembler::zero, false, Assembler::pt, Label1);
duke@0 2218 __ delayed()->nop();
duke@0 2219
duke@0 2220 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
duke@0 2221
duke@0 2222 if (is_static) {
duke@0 2223 __ clr(Otos_i);
duke@0 2224 } else {
duke@0 2225 if (has_tos) {
duke@0 2226 // save object pointer before call_VM() clobbers it
coleenp@450 2227 __ push_ptr(Otos_i); // put object on tos where GC wants it.
duke@0 2228 } else {
duke@0 2229 // Load top of stack (do not pop the value off the stack);
duke@0 2230 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
duke@0 2231 }
duke@0 2232 __ verify_oop(Otos_i);
duke@0 2233 }
duke@0 2234 // Otos_i: object pointer or NULL if static
duke@0 2235 // Rcache: cache entry pointer
duke@0 2236 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
duke@0 2237 Otos_i, Rcache);
duke@0 2238 if (!is_static && has_tos) {
coleenp@450 2239 __ pop_ptr(Otos_i); // restore object pointer
duke@0 2240 __ verify_oop(Otos_i);
duke@0 2241 }
duke@0 2242 __ get_cache_and_index_at_bcp(Rcache, index, 1);
duke@0 2243 __ bind(Label1);
duke@0 2244 }
duke@0 2245 }
duke@0 2246
duke@0 2247 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
duke@0 2248 transition(vtos, vtos);
duke@0 2249
duke@0 2250 Register Rcache = G3_scratch;
duke@0 2251 Register index = G4_scratch;
duke@0 2252 Register Rclass = Rcache;
duke@0 2253 Register Roffset= G4_scratch;
duke@0 2254 Register Rflags = G1_scratch;
duke@0 2255 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2256
jrose@1485 2257 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
duke@0 2258 jvmti_post_field_access(Rcache, index, is_static, false);
duke@0 2259 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
duke@0 2260
duke@0 2261 if (!is_static) {
duke@0 2262 pop_and_check_object(Rclass);
duke@0 2263 } else {
duke@0 2264 __ verify_oop(Rclass);
duke@0 2265 }
duke@0 2266
duke@0 2267 Label exit;
duke@0 2268
duke@0 2269 Assembler::Membar_mask_bits membar_bits =
duke@0 2270 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
duke@0 2271
duke@0 2272 if (__ membar_has_effect(membar_bits)) {
duke@0 2273 // Get volatile flag
duke@0 2274 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
duke@0 2275 __ and3(Rflags, Lscratch, Lscratch);
duke@0 2276 }
duke@0 2277
duke@0 2278 Label checkVolatile;
duke@0 2279
duke@0 2280 // compute field type
duke@0 2281 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
duke@0 2282 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
duke@0 2283 // Make sure we don't need to mask Rflags for tosBits after the above shift
duke@0 2284 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2285
duke@0 2286 // Check atos before itos for getstatic, more likely (in Queens at least)
duke@0 2287 __ cmp(Rflags, atos);
duke@0 2288 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
duke@0 2289 __ delayed() ->cmp(Rflags, itos);
duke@0 2290
duke@0 2291 // atos
coleenp@113 2292 __ load_heap_oop(Rclass, Roffset, Otos_i);
duke@0 2293 __ verify_oop(Otos_i);
duke@0 2294 __ push(atos);
duke@0 2295 if (!is_static) {
duke@0 2296 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
duke@0 2297 }
duke@0 2298 __ ba(false, checkVolatile);
duke@0 2299 __ delayed()->tst(Lscratch);
duke@0 2300
duke@0 2301 __ bind(notObj);
duke@0 2302
duke@0 2303 // cmp(Rflags, itos);
duke@0 2304 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
duke@0 2305 __ delayed() ->cmp(Rflags, ltos);
duke@0 2306
duke@0 2307 // itos
duke@0 2308 __ ld(Rclass, Roffset, Otos_i);
duke@0 2309 __ push(itos);
duke@0 2310 if (!is_static) {
duke@0 2311 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
duke@0 2312 }
duke@0 2313 __ ba(false, checkVolatile);
duke@0 2314 __ delayed()->tst(Lscratch);
duke@0 2315
duke@0 2316 __ bind(notInt);
duke@0 2317
duke@0 2318 // cmp(Rflags, ltos);
duke@0 2319 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
duke@0 2320 __ delayed() ->cmp(Rflags, btos);
duke@0 2321
duke@0 2322 // ltos
duke@0 2323 // load must be atomic
duke@0 2324 __ ld_long(Rclass, Roffset, Otos_l);
duke@0 2325 __ push(ltos);
duke@0 2326 if (!is_static) {
duke@0 2327 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
duke@0 2328 }
duke@0 2329 __ ba(false, checkVolatile);
duke@0 2330 __ delayed()->tst(Lscratch);
duke@0 2331
duke@0 2332 __ bind(notLong);
duke@0 2333
duke@0 2334 // cmp(Rflags, btos);
duke@0 2335 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
duke@0 2336 __ delayed() ->cmp(Rflags, ctos);
duke@0 2337
duke@0 2338 // btos
duke@0 2339 __ ldsb(Rclass, Roffset, Otos_i);
duke@0 2340 __ push(itos);
duke@0 2341 if (!is_static) {
duke@0 2342 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
duke@0 2343 }
duke@0 2344 __ ba(false, checkVolatile);
duke@0 2345 __ delayed()->tst(Lscratch);
duke@0 2346
duke@0 2347 __ bind(notByte);
duke@0 2348
duke@0 2349 // cmp(Rflags, ctos);
duke@0 2350 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
duke@0 2351 __ delayed() ->cmp(Rflags, stos);
duke@0 2352
duke@0 2353 // ctos
duke@0 2354 __ lduh(Rclass, Roffset, Otos_i);
duke@0 2355 __ push(itos);
duke@0 2356 if (!is_static) {
duke@0 2357 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
duke@0 2358 }
duke@0 2359 __ ba(false, checkVolatile);
duke@0 2360 __ delayed()->tst(Lscratch);
duke@0 2361
duke@0 2362 __ bind(notChar);
duke@0 2363
duke@0 2364 // cmp(Rflags, stos);
duke@0 2365 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
duke@0 2366 __ delayed() ->cmp(Rflags, ftos);
duke@0 2367
duke@0 2368 // stos
duke@0 2369 __ ldsh(Rclass, Roffset, Otos_i);
duke@0 2370 __ push(itos);
duke@0 2371 if (!is_static) {
duke@0 2372 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
duke@0 2373 }
duke@0 2374 __ ba(false, checkVolatile);
duke@0 2375 __ delayed()->tst(Lscratch);
duke@0 2376
duke@0 2377 __ bind(notShort);
duke@0 2378
duke@0 2379
duke@0 2380 // cmp(Rflags, ftos);
duke@0 2381 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
duke@0 2382 __ delayed() ->tst(Lscratch);
duke@0 2383
duke@0 2384 // ftos
duke@0 2385 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
duke@0 2386 __ push(ftos);
duke@0 2387 if (!is_static) {
duke@0 2388 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
duke@0 2389 }
duke@0 2390 __ ba(false, checkVolatile);
duke@0 2391 __ delayed()->tst(Lscratch);
duke@0 2392
duke@0 2393 __ bind(notFloat);
duke@0 2394
duke@0 2395
duke@0 2396 // dtos
duke@0 2397 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
duke@0 2398 __ push(dtos);
duke@0 2399 if (!is_static) {
duke@0 2400 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
duke@0 2401 }
duke@0 2402
duke@0 2403 __ bind(checkVolatile);
duke@0 2404 if (__ membar_has_effect(membar_bits)) {
duke@0 2405 // __ tst(Lscratch); executed in delay slot
duke@0 2406 __ br(Assembler::zero, false, Assembler::pt, exit);
duke@0 2407 __ delayed()->nop();
duke@0 2408 volatile_barrier(membar_bits);
duke@0 2409 }
duke@0 2410
duke@0 2411 __ bind(exit);
duke@0 2412 }
duke@0 2413
duke@0 2414
duke@0 2415 void TemplateTable::getfield(int byte_no) {
duke@0 2416 getfield_or_static(byte_no, false);
duke@0 2417 }
duke@0 2418
duke@0 2419 void TemplateTable::getstatic(int byte_no) {
duke@0 2420 getfield_or_static(byte_no, true);
duke@0 2421 }
duke@0 2422
duke@0 2423
duke@0 2424 void TemplateTable::fast_accessfield(TosState state) {
duke@0 2425 transition(atos, state);
duke@0 2426 Register Rcache = G3_scratch;
duke@0 2427 Register index = G4_scratch;
duke@0 2428 Register Roffset = G4_scratch;
duke@0 2429 Register Rflags = Rcache;
duke@0 2430 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2431
duke@0 2432 __ get_cache_and_index_at_bcp(Rcache, index, 1);
duke@0 2433 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
duke@0 2434
twisti@727 2435 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
duke@0 2436
duke@0 2437 __ null_check(Otos_i);
duke@0 2438 __ verify_oop(Otos_i);
duke@0 2439
duke@0 2440 Label exit;
duke@0 2441
duke@0 2442 Assembler::Membar_mask_bits membar_bits =
duke@0 2443 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
duke@0 2444 if (__ membar_has_effect(membar_bits)) {
duke@0 2445 // Get volatile flag
twisti@727 2446 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
duke@0 2447 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
duke@0 2448 }
duke@0 2449
duke@0 2450 switch (bytecode()) {
duke@0 2451 case Bytecodes::_fast_bgetfield:
duke@0 2452 __ ldsb(Otos_i, Roffset, Otos_i);
duke@0 2453 break;
duke@0 2454 case Bytecodes::_fast_cgetfield:
duke@0 2455 __ lduh(Otos_i, Roffset, Otos_i);
duke@0 2456 break;
duke@0 2457 case Bytecodes::_fast_sgetfield:
duke@0 2458 __ ldsh(Otos_i, Roffset, Otos_i);
duke@0 2459 break;
duke@0 2460 case Bytecodes::_fast_igetfield:
duke@0 2461 __ ld(Otos_i, Roffset, Otos_i);
duke@0 2462 break;
duke@0 2463 case Bytecodes::_fast_lgetfield:
duke@0 2464 __ ld_long(Otos_i, Roffset, Otos_l);
duke@0 2465 break;
duke@0 2466 case Bytecodes::_fast_fgetfield:
duke@0 2467 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
duke@0 2468 break;
duke@0 2469 case Bytecodes::_fast_dgetfield:
duke@0 2470 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
duke@0 2471 break;
duke@0 2472 case Bytecodes::_fast_agetfield:
coleenp@113 2473 __ load_heap_oop(Otos_i, Roffset, Otos_i);
duke@0 2474 break;
duke@0 2475 default:
duke@0 2476 ShouldNotReachHere();
duke@0 2477 }
duke@0 2478
duke@0 2479 if (__ membar_has_effect(membar_bits)) {
duke@0 2480 __ btst(Lscratch, Rflags);
duke@0 2481 __ br(Assembler::zero, false, Assembler::pt, exit);
duke@0 2482 __ delayed()->nop();
duke@0 2483 volatile_barrier(membar_bits);
duke@0 2484 __ bind(exit);
duke@0 2485 }
duke@0 2486
duke@0 2487 if (state == atos) {
duke@0 2488 __ verify_oop(Otos_i); // does not blow flags!
duke@0 2489 }
duke@0 2490 }
duke@0 2491
duke@0 2492 void TemplateTable::jvmti_post_fast_field_mod() {
duke@0 2493 if (JvmtiExport::can_post_field_modification()) {
duke@0 2494 // Check to see if a field modification watch has been set before we take
duke@0 2495 // the time to call into the VM.
duke@0 2496 Label done;
twisti@727 2497 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
duke@0 2498 __ load_contents(get_field_modification_count_addr, G4_scratch);
duke@0 2499 __ tst(G4_scratch);
duke@0 2500 __ br(Assembler::zero, false, Assembler::pt, done);
duke@0 2501 __ delayed()->nop();
duke@0 2502 __ pop_ptr(G4_scratch); // copy the object pointer from tos
duke@0 2503 __ verify_oop(G4_scratch);
duke@0 2504 __ push_ptr(G4_scratch); // put the object pointer back on tos
duke@0 2505 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
duke@0 2506 // Save tos values before call_VM() clobbers them. Since we have
duke@0 2507 // to do it for every data type, we use the saved values as the
duke@0 2508 // jvalue object.
duke@0 2509 switch (bytecode()) { // save tos values before call_VM() clobbers them
duke@0 2510 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
duke@0 2511 case Bytecodes::_fast_bputfield: // fall through
duke@0 2512 case Bytecodes::_fast_sputfield: // fall through
duke@0 2513 case Bytecodes::_fast_cputfield: // fall through
duke@0 2514 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
duke@0 2515 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
duke@0 2516 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
duke@0 2517 // get words in right order for use as jvalue object
duke@0 2518 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
duke@0 2519 }
duke@0 2520 // setup pointer to jvalue object
duke@0 2521 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
duke@0 2522 // G4_scratch: object pointer
duke@0 2523 // G1_scratch: cache entry pointer
duke@0 2524 // G3_scratch: jvalue object on the stack
duke@0 2525 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
duke@0 2526 switch (bytecode()) { // restore tos values
duke@0 2527 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
duke@0 2528 case Bytecodes::_fast_bputfield: // fall through
duke@0 2529 case Bytecodes::_fast_sputfield: // fall through
duke@0 2530 case Bytecodes::_fast_cputfield: // fall through
duke@0 2531 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
duke@0 2532 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
duke@0 2533 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
duke@0 2534 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
duke@0 2535 }
duke@0 2536 __ bind(done);
duke@0 2537 }
duke@0 2538 }
duke@0 2539
duke@0 2540 // The registers Rcache and index expected to be set before call.
duke@0 2541 // The function may destroy various registers, just not the Rcache and index registers.
duke@0 2542 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
duke@0 2543 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2544
duke@0 2545 if (JvmtiExport::can_post_field_modification()) {
duke@0 2546 // Check to see if a field modification watch has been set before we take
duke@0 2547 // the time to call into the VM.
duke@0 2548 Label Label1;
duke@0 2549 assert_different_registers(Rcache, index, G1_scratch);
twisti@727 2550 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
duke@0 2551 __ load_contents(get_field_modification_count_addr, G1_scratch);
duke@0 2552 __ tst(G1_scratch);
duke@0 2553 __ br(Assembler::zero, false, Assembler::pt, Label1);
duke@0 2554 __ delayed()->nop();
duke@0 2555
duke@0 2556 // The Rcache and index registers have been already set.
duke@0 2557 // This allows to eliminate this call but the Rcache and index
duke@0 2558 // registers must be correspondingly used after this line.
duke@0 2559 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
duke@0 2560
duke@0 2561 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
duke@0 2562 if (is_static) {
duke@0 2563 // Life is simple. Null out the object pointer.
duke@0 2564 __ clr(G4_scratch);
duke@0 2565 } else {
duke@0 2566 Register Rflags = G1_scratch;
duke@0 2567 // Life is harder. The stack holds the value on top, followed by the
duke@0 2568 // object. We don't know the size of the value, though; it could be
duke@0 2569 // one or two words depending on its type. As a result, we must find
duke@0 2570 // the type to determine where the object is.
duke@0 2571
duke@0 2572 Label two_word, valsizeknown;
twisti@727 2573 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
duke@0 2574 __ mov(Lesp, G4_scratch);
duke@0 2575 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
duke@0 2576 // Make sure we don't need to mask Rflags for tosBits after the above shift
duke@0 2577 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2578 __ cmp(Rflags, ltos);
duke@0 2579 __ br(Assembler::equal, false, Assembler::pt, two_word);
duke@0 2580 __ delayed()->cmp(Rflags, dtos);
duke@0 2581 __ br(Assembler::equal, false, Assembler::pt, two_word);
duke@0 2582 __ delayed()->nop();
duke@0 2583 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
duke@0 2584 __ br(Assembler::always, false, Assembler::pt, valsizeknown);
duke@0 2585 __ delayed()->nop();
duke@0 2586 __ bind(two_word);
duke@0 2587
duke@0 2588 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
duke@0 2589
duke@0 2590 __ bind(valsizeknown);
duke@0 2591 // setup object pointer
duke@0 2592 __ ld_ptr(G4_scratch, 0, G4_scratch);
duke@0 2593 __ verify_oop(G4_scratch);
duke@0 2594 }
duke@0 2595 // setup pointer to jvalue object
duke@0 2596 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
duke@0 2597 // G4_scratch: object pointer or NULL if static
duke@0 2598 // G3_scratch: cache entry pointer
duke@0 2599 // G1_scratch: jvalue object on the stack
duke@0 2600 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
duke@0 2601 G4_scratch, G3_scratch, G1_scratch);
duke@0 2602 __ get_cache_and_index_at_bcp(Rcache, index, 1);
duke@0 2603 __ bind(Label1);
duke@0 2604 }
duke@0 2605 }
duke@0 2606
duke@0 2607 void TemplateTable::pop_and_check_object(Register r) {
duke@0 2608 __ pop_ptr(r);
duke@0 2609 __ null_check(r); // for field access must check obj.
duke@0 2610 __ verify_oop(r);
duke@0 2611 }
duke@0 2612
duke@0 2613 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
duke@0 2614 transition(vtos, vtos);
duke@0 2615 Register Rcache = G3_scratch;
duke@0 2616 Register index = G4_scratch;
duke@0 2617 Register Rclass = Rcache;
duke@0 2618 Register Roffset= G4_scratch;
duke@0 2619 Register Rflags = G1_scratch;
duke@0 2620 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2621
jrose@1485 2622 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
duke@0 2623 jvmti_post_field_mod(Rcache, index, is_static);
duke@0 2624 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
duke@0 2625
duke@0 2626 Assembler::Membar_mask_bits read_bits =
duke@0 2627 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
duke@0 2628 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
duke@0 2629
duke@0 2630 Label notVolatile, checkVolatile, exit;
duke@0 2631 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
duke@0 2632 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
duke@0 2633 __ and3(Rflags, Lscratch, Lscratch);
duke@0 2634
duke@0 2635 if (__ membar_has_effect(read_bits)) {
duke@0 2636 __ tst(Lscratch);
duke@0 2637 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
duke@0 2638 __ delayed()->nop();
duke@0 2639 volatile_barrier(read_bits);
duke@0 2640 __ bind(notVolatile);
duke@0 2641 }
duke@0 2642 }
duke@0 2643
duke@0 2644 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
duke@0 2645 // Make sure we don't need to mask Rflags for tosBits after the above shift
duke@0 2646 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2647
duke@0 2648 // compute field type
duke@0 2649 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
duke@0 2650
duke@0 2651 if (is_static) {
duke@0 2652 // putstatic with object type most likely, check that first
duke@0 2653 __ cmp(Rflags, atos );
duke@0 2654 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
duke@0 2655 __ delayed() ->cmp(Rflags, itos );
duke@0 2656
duke@0 2657 // atos
duke@0 2658 __ pop_ptr();
duke@0 2659 __ verify_oop(Otos_i);
ysr@342 2660
ysr@342 2661 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
ysr@342 2662
duke@0 2663 __ ba(false, checkVolatile);
duke@0 2664 __ delayed()->tst(Lscratch);
duke@0 2665
duke@0 2666 __ bind(notObj);
duke@0 2667
duke@0 2668 // cmp(Rflags, itos );
duke@0 2669 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
duke@0 2670 __ delayed() ->cmp(Rflags, btos );
duke@0 2671
duke@0 2672 // itos
duke@0 2673 __ pop_i();
duke@0 2674 __ st(Otos_i, Rclass, Roffset);
duke@0 2675 __ ba(false, checkVolatile);
duke@0 2676 __ delayed()->tst(Lscratch);
duke@0 2677
duke@0 2678 __ bind(notInt);
duke@0 2679
duke@0 2680 } else {
duke@0 2681 // putfield with int type most likely, check that first
duke@0 2682 __ cmp(Rflags, itos );
duke@0 2683 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
duke@0 2684 __ delayed() ->cmp(Rflags, atos );
duke@0 2685
duke@0 2686 // itos
duke@0 2687 __ pop_i();
duke@0 2688 pop_and_check_object(Rclass);
duke@0 2689 __ st(Otos_i, Rclass, Roffset);
duke@0 2690 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
duke@0 2691 __ ba(false, checkVolatile);
duke@0 2692 __ delayed()->tst(Lscratch);
duke@0 2693
duke@0 2694 __ bind(notInt);
duke@0 2695 // cmp(Rflags, atos );
duke@0 2696 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
duke@0 2697 __ delayed() ->cmp(Rflags, btos );
duke@0 2698
duke@0 2699 // atos
duke@0 2700 __ pop_ptr();
duke@0 2701 pop_and_check_object(Rclass);
duke@0 2702 __ verify_oop(Otos_i);
ysr@342 2703
ysr@342 2704 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
ysr@342 2705
duke@0 2706 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
duke@0 2707 __ ba(false, checkVolatile);
duke@0 2708 __ delayed()->tst(Lscratch);
duke@0 2709
duke@0 2710 __ bind(notObj);
duke@0 2711 }
duke@0 2712
duke@0 2713 // cmp(Rflags, btos );
duke@0 2714 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
duke@0 2715 __ delayed() ->cmp(Rflags, ltos );
duke@0 2716
duke@0 2717 // btos
duke@0 2718 __ pop_i();
duke@0 2719 if (!is_static) pop_and_check_object(Rclass);
duke@0 2720 __ stb(Otos_i, Rclass, Roffset);
duke@0 2721 if (!is_static) {
duke@0 2722 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
duke@0 2723 }
duke@0 2724 __ ba(false, checkVolatile);
duke@0 2725 __ delayed()->tst(Lscratch);
duke@0 2726
duke@0 2727 __ bind(notByte);
duke@0 2728
duke@0 2729 // cmp(Rflags, ltos );
duke@0 2730 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
duke@0 2731 __ delayed() ->cmp(Rflags, ctos );
duke@0 2732
duke@0 2733 // ltos
duke@0 2734 __ pop_l();
duke@0 2735 if (!is_static) pop_and_check_object(Rclass);
duke@0 2736 __ st_long(Otos_l, Rclass, Roffset);
duke@0 2737 if (!is_static) {
duke@0 2738 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
duke@0 2739 }
duke@0 2740 __ ba(false, checkVolatile);
duke@0 2741 __ delayed()->tst(Lscratch);
duke@0 2742
duke@0 2743 __ bind(notLong);
duke@0 2744
duke@0 2745 // cmp(Rflags, ctos );
duke@0 2746 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
duke@0 2747 __ delayed() ->cmp(Rflags, stos );
duke@0 2748
duke@0 2749 // ctos (char)
duke@0 2750 __ pop_i();
duke@0 2751 if (!is_static) pop_and_check_object(Rclass);
duke@0 2752 __ sth(Otos_i, Rclass, Roffset);
duke@0 2753 if (!is_static) {
duke@0 2754 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
duke@0 2755 }
duke@0 2756 __ ba(false, checkVolatile);
duke@0 2757 __ delayed()->tst(Lscratch);
duke@0 2758
duke@0 2759 __ bind(notChar);
duke@0 2760 // cmp(Rflags, stos );
duke@0 2761 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
duke@0 2762 __ delayed() ->cmp(Rflags, ftos );
duke@0 2763
duke@0 2764 // stos (char)
duke@0 2765 __ pop_i();
duke@0 2766 if (!is_static) pop_and_check_object(Rclass);
duke@0 2767 __ sth(Otos_i, Rclass, Roffset);
duke@0 2768 if (!is_static) {
duke@0 2769 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
duke@0 2770 }
duke@0 2771 __ ba(false, checkVolatile);
duke@0 2772 __ delayed()->tst(Lscratch);
duke@0 2773
duke@0 2774 __ bind(notShort);
duke@0 2775 // cmp(Rflags, ftos );
duke@0 2776 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
duke@0 2777 __ delayed()->nop();
duke@0 2778
duke@0 2779 // ftos
duke@0 2780 __ pop_f();
duke@0 2781 if (!is_static) pop_and_check_object(Rclass);
duke@0 2782 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
duke@0 2783 if (!is_static) {
duke@0 2784 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
duke@0 2785 }
duke@0 2786 __ ba(false, checkVolatile);
duke@0 2787 __ delayed()->tst(Lscratch);
duke@0 2788
duke@0 2789 __ bind(notFloat);
duke@0 2790
duke@0 2791 // dtos
duke@0 2792 __ pop_d();
duke@0 2793 if (!is_static) pop_and_check_object(Rclass);
duke@0 2794 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
duke@0 2795 if (!is_static) {
duke@0 2796 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
duke@0 2797 }
duke@0 2798
duke@0 2799 __ bind(checkVolatile);
duke@0 2800 __ tst(Lscratch);
duke@0 2801
duke@0 2802 if (__ membar_has_effect(write_bits)) {
duke@0 2803 // __ tst(Lscratch); in delay slot
duke@0 2804 __ br(Assembler::zero, false, Assembler::pt, exit);
duke@0 2805 __ delayed()->nop();
duke@0 2806 volatile_barrier(Assembler::StoreLoad);
duke@0 2807 __ bind(exit);
duke@0 2808 }
duke@0 2809 }
duke@0 2810
duke@0 2811 void TemplateTable::fast_storefield(TosState state) {
duke@0 2812 transition(state, vtos);
duke@0 2813 Register Rcache = G3_scratch;
duke@0 2814 Register Rclass = Rcache;
duke@0 2815 Register Roffset= G4_scratch;
duke@0 2816 Register Rflags = G1_scratch;
duke@0 2817 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
duke@0 2818
duke@0 2819 jvmti_post_fast_field_mod();
duke@0 2820
duke@0 2821 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
duke@0 2822
duke@0 2823 Assembler::Membar_mask_bits read_bits =
duke@0 2824 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
duke@0 2825 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
duke@0 2826
duke@0 2827 Label notVolatile, checkVolatile, exit;
duke@0 2828 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
twisti@727 2829 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
duke@0 2830 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
duke@0 2831 __ and3(Rflags, Lscratch, Lscratch);
duke@0 2832 if (__ membar_has_effect(read_bits)) {
duke@0 2833 __ tst(Lscratch);
duke@0 2834 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
duke@0 2835 __ delayed()->nop();
duke@0 2836 volatile_barrier(read_bits);
duke@0 2837 __ bind(notVolatile);
duke@0 2838 }
duke@0 2839 }
duke@0 2840
twisti@727 2841 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
duke@0 2842 pop_and_check_object(Rclass);
duke@0 2843
duke@0 2844 switch (bytecode()) {
duke@0 2845 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
duke@0 2846 case Bytecodes::_fast_cputfield: /* fall through */
duke@0 2847 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
duke@0 2848 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
duke@0 2849 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
duke@0 2850 case Bytecodes::_fast_fputfield:
duke@0 2851 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
duke@0 2852 break;
duke@0 2853 case Bytecodes::_fast_dputfield:
duke@0 2854 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
duke@0 2855 break;
duke@0 2856 case Bytecodes::_fast_aputfield:
ysr@342 2857 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
duke@0 2858 break;
duke@0 2859 default:
duke@0 2860 ShouldNotReachHere();
duke@0 2861 }
duke@0 2862
duke@0 2863 if (__ membar_has_effect(write_bits)) {
duke@0 2864 __ tst(Lscratch);
duke@0 2865 __ br(Assembler::zero, false, Assembler::pt, exit);
duke@0 2866 __ delayed()->nop();
duke@0 2867 volatile_barrier(Assembler::StoreLoad);
duke@0 2868 __ bind(exit);
duke@0 2869 }
duke@0 2870 }
duke@0 2871
duke@0 2872
duke@0 2873 void TemplateTable::putfield(int byte_no) {
duke@0 2874 putfield_or_static(byte_no, false);
duke@0 2875 }
duke@0 2876
duke@0 2877 void TemplateTable::putstatic(int byte_no) {
duke@0 2878 putfield_or_static(byte_no, true);
duke@0 2879 }
duke@0 2880
duke@0 2881
duke@0 2882 void TemplateTable::fast_xaccess(TosState state) {
duke@0 2883 transition(vtos, state);
duke@0 2884 Register Rcache = G3_scratch;
duke@0 2885 Register Roffset = G4_scratch;
duke@0 2886 Register Rflags = G4_scratch;
duke@0 2887 Register Rreceiver = Lscratch;
duke@0 2888
twisti@1426 2889 __ ld_ptr(Llocals, 0, Rreceiver);
duke@0 2890
duke@0 2891 // access constant pool cache (is resolved)
duke@0 2892 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
twisti@727 2893 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
duke@0 2894 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
duke@0 2895
duke@0 2896 __ verify_oop(Rreceiver);
duke@0 2897 __ null_check(Rreceiver);
duke@0 2898 if (state == atos) {
coleenp@113 2899 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
duke@0 2900 } else if (state == itos) {
duke@0 2901 __ ld (Rreceiver, Roffset, Otos_i) ;
duke@0 2902 } else if (state == ftos) {
duke@0 2903 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
duke@0 2904 } else {
duke@0 2905 ShouldNotReachHere();
duke@0 2906 }
duke@0 2907
duke@0 2908 Assembler::Membar_mask_bits membar_bits =
duke@0 2909 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
duke@0 2910 if (__ membar_has_effect(membar_bits)) {
duke@0 2911
duke@0 2912 // Get is_volatile value in Rflags and check if membar is needed
twisti@727 2913 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
duke@0 2914
duke@0 2915 // Test volatile
duke@0 2916 Label notVolatile;
duke@0 2917 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
duke@0 2918 __ btst(Rflags, Lscratch);
duke@0 2919 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
duke@0 2920 __ delayed()->nop();
duke@0 2921 volatile_barrier(membar_bits);
duke@0 2922 __ bind(notVolatile);
duke@0 2923 }
duke@0 2924
duke@0 2925 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@0 2926 __ sub(Lbcp, 1, Lbcp);
duke@0 2927 }
duke@0 2928
duke@0 2929 //----------------------------------------------------------------------------------------------------
duke@0 2930 // Calls
duke@0 2931
duke@0 2932 void TemplateTable::count_calls(Register method, Register temp) {
duke@0 2933 // implemented elsewhere
duke@0 2934 ShouldNotReachHere();
duke@0 2935 }
duke@0 2936
duke@0 2937 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
duke@0 2938 Register Rtemp = G4_scratch;
duke@0 2939 Register Rcall = Rindex;
duke@0 2940 assert_different_registers(Rcall, G5_method, Gargs, Rret);
duke@0 2941
duke@0 2942 // get target methodOop & entry point
duke@0 2943 const int base = instanceKlass::vtable_start_offset() * wordSize;
duke@0 2944 if (vtableEntry::size() % 3 == 0) {
duke@0 2945 // scale the vtable index by 12:
duke@0 2946 int one_third = vtableEntry::size() / 3;
duke@0 2947 __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
duke@0 2948 __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
duke@0 2949 __ add(Rindex, Rtemp, Rindex);
duke@0 2950 } else {
duke@0 2951 // scale the vtable index by 8:
duke@0 2952 __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
duke@0 2953 }
duke@0 2954
duke@0 2955 __ add(Rrecv, Rindex, Rrecv);
duke@0 2956 __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
duke@0 2957
duke@0 2958 __ call_from_interpreter(Rcall, Gargs, Rret);
duke@0 2959 }
duke@0 2960
duke@0 2961 void TemplateTable::invokevirtual(int byte_no) {
duke@0 2962 transition(vtos, vtos);
jrose@1485 2963 assert(byte_no == f2_byte, "use this argument");
duke@0 2964
duke@0 2965 Register Rscratch = G3_scratch;
duke@0 2966 Register Rtemp = G4_scratch;
duke@0 2967 Register Rret = Lscratch;
duke@0 2968 Register Rrecv = G5_method;
duke@0 2969 Label notFinal;
duke@0 2970
jrose@1485 2971 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
duke@0 2972 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
duke@0 2973
duke@0 2974 // Check for vfinal
duke@0 2975 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
duke@0 2976 __ btst(Rret, G4_scratch);
duke@0 2977 __ br(Assembler::zero, false, Assembler::pt, notFinal);
duke@0 2978 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
duke@0 2979
duke@0 2980 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
duke@0 2981
duke@0 2982 invokevfinal_helper(Rscratch, Rret);
duke@0 2983
duke@0 2984 __ bind(notFinal);
duke@0 2985
duke@0 2986 __ mov(G5_method, Rscratch); // better scratch register
duke@0 2987 __ load_receiver(G4_scratch, O0); // gets receiverOop
duke@0 2988 // receiver is in O0
duke@0 2989 __ verify_oop(O0);
duke@0 2990
duke@0 2991 // get return address
twisti@727 2992 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
twisti@727 2993 __ set(table, Rtemp);
duke@0 2994 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
duke@0 2995 // Make sure we don't need to mask Rret for tosBits after the above shift
duke@0 2996 ConstantPoolCacheEntry::verify_tosBits();
duke@0 2997 __ sll(Rret, LogBytesPerWord, Rret);
duke@0 2998 __ ld_ptr(Rtemp, Rret, Rret); // get return address
duke@0 2999
duke@0 3000 // get receiver klass
duke@0 3001 __ null_check(O0, oopDesc::klass_offset_in_bytes());
coleenp@113 3002 __ load_klass(O0, Rrecv);
duke@0 3003 __ verify_oop(Rrecv);
duke@0 3004
duke@0 3005 __ profile_virtual_call(Rrecv, O4);
duke@0 3006
duke@0 3007 generate_vtable_call(Rrecv, Rscratch, Rret);
duke@0 3008 }
duke@0 3009
duke@0 3010 void TemplateTable::fast_invokevfinal(int byte_no) {
duke@0 3011 transition(vtos, vtos);
jrose@1485 3012 assert(byte_no == f2_byte, "use this argument");
duke@0 3013
duke@0 3014 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
jrose@1485 3015 /*is_invokevfinal*/true, false);
duke@0 3016 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
duke@0 3017 invokevfinal_helper(G3_scratch, Lscratch);
duke@0 3018 }
duke@0 3019
duke@0 3020 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
duke@0 3021 Register Rtemp = G4_scratch;
duke@0 3022
duke@0 3023 __ verify_oop(G5_method);
duke@0 3024
duke@0 3025 // Load receiver from stack slot
twisti@727 3026 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
duke@0 3027 __ load_receiver(G4_scratch, O0);
duke@0 3028
duke@0 3029 // receiver NULL check
duke@0 3030 __ null_check(O0);
duke@0 3031
duke@0 3032 __ profile_final_call(O4);
duke@0 3033
duke@0 3034 // get return address
twisti@727 3035 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
twisti@727 3036 __ set(table, Rtemp);
duke@0 3037 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
duke@0 3038 // Make sure we don't need to mask Rret for tosBits after the above shift
duke@0 3039 ConstantPoolCacheEntry::verify_tosBits();
duke@0 3040 __ sll(Rret, LogBytesPerWord, Rret);
duke@0 3041 __ ld_ptr(Rtemp, Rret, Rret); // get return address
duke@0 3042
duke@0 3043
duke@0 3044 // do the call
duke@0 3045 __ call_from_interpreter(Rscratch, Gargs, Rret);
duke@0 3046 }
duke@0 3047
duke@0 3048 void TemplateTable::invokespecial(int byte_no) {
duke@0 3049 transition(vtos, vtos);
jrose@1485 3050 assert(byte_no == f1_byte, "use this argument");
duke@0 3051
duke@0 3052 Register Rscratch = G3_scratch;
duke@0 3053 Register Rtemp = G4_scratch;
duke@0 3054 Register Rret = Lscratch;
duke@0 3055
jrose@1485 3056 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
duke@0 3057 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
duke@0 3058
duke@0 3059 __ verify_oop(G5_method);
duke@0 3060
twisti@727 3061 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
duke@0 3062 __ load_receiver(G4_scratch, O0);
duke@0 3063
duke@0 3064 // receiver NULL check
duke@0 3065 __ null_check(O0);
duke@0 3066
duke@0 3067 __ profile_call(O4);
duke@0 3068
duke@0 3069 // get return address
twisti@727 3070 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
twisti@727 3071 __ set(table, Rtemp);
duke@0 3072 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
duke@0 3073 // Make sure we don't need to mask Rret for tosBits after the above shift
duke@0 3074 ConstantPoolCacheEntry::verify_tosBits();
duke@0 3075 __ sll(Rret, LogBytesPerWord, Rret);
duke@0 3076 __ ld_ptr(Rtemp, Rret, Rret); // get return address
duke@0 3077
duke@0 3078 // do the call
duke@0 3079 __ call_from_interpreter(Rscratch, Gargs, Rret);
duke@0 3080 }
duke@0 3081
duke@0 3082 void TemplateTable::invokestatic(int byte_no) {
duke@0 3083 transition(vtos, vtos);
jrose@1485 3084 assert(byte_no == f1_byte, "use this argument");
duke@0 3085
duke@0 3086 Register Rscratch = G3_scratch;
duke@0 3087 Register Rtemp = G4_scratch;
duke@0 3088 Register Rret = Lscratch;
duke@0 3089
jrose@1485 3090 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
duke@0 3091 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
duke@0 3092
duke@0 3093 __ verify_oop(G5_method);
duke@0 3094
duke@0 3095 __ profile_call(O4);
duke@0 3096
duke@0 3097 // get return address
twisti@727 3098 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
twisti@727 3099 __ set(table, Rtemp);
duke@0 3100 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
duke@0 3101 // Make sure we don't need to mask Rret for tosBits after the above shift
duke@0 3102 ConstantPoolCacheEntry::verify_tosBits();
duke@0 3103 __ sll(Rret, LogBytesPerWord, Rret);
duke@0 3104 __ ld_ptr(Rtemp, Rret, Rret); // get return address
duke@0 3105
duke@0 3106 // do the call
duke@0 3107 __ call_from_interpreter(Rscratch, Gargs, Rret);
duke@0 3108 }
duke@0 3109
duke@0 3110
duke@0 3111 void TemplateTable::invokeinterface_object_method(Register RklassOop,
duke@0 3112 Register Rcall,
duke@0 3113 Register Rret,
duke@0 3114 Register Rflags) {
duke@0 3115 Register Rscratch = G4_scratch;
duke@0 3116 Register Rindex = Lscratch;
duke@0 3117
duke@0 3118 assert_different_registers(Rscratch, Rindex, Rret);
duke@0 3119
duke@0 3120 Label notFinal;
duke@0 3121
duke@0 3122 // Check for vfinal
duke@0 3123 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
duke@0 3124 __ btst(Rflags, Rscratch);
duke@0 3125 __ br(Assembler::zero, false, Assembler::pt, notFinal);
duke@0 3126 __ delayed()->nop();
duke@0 3127
duke@0 3128 __ profile_final_call(O4);
duke@0 3129
duke@0 3130 // do the call - the index (f2) contains the methodOop
duke@0 3131 assert_different_registers(G5_method, Gargs, Rcall);
duke@0 3132 __ mov(Rindex, G5_method);
duke@0 3133 __ call_from_interpreter(Rcall, Gargs, Rret);
duke@0 3134 __ bind(notFinal);
duke@0 3135
duke@0 3136 __ profile_virtual_call(RklassOop, O4);
duke@0 3137 generate_vtable_call(RklassOop, Rindex, Rret);
duke@0 3138 }
duke@0 3139
duke@0 3140
duke@0 3141 void TemplateTable::invokeinterface(int byte_no) {
duke@0 3142 transition(vtos, vtos);
jrose@1485 3143 assert(byte_no == f1_byte, "use this argument");
duke@0 3144
duke@0 3145 Register Rscratch = G4_scratch;
duke@0 3146 Register Rret = G3_scratch;
duke@0 3147 Register Rindex = Lscratch;
duke@0 3148 Register Rinterface = G1_scratch;
duke@0 3149 Register RklassOop = G5_method;
duke@0 3150 Register Rflags = O1;
duke@0 3151 assert_different_registers(Rscratch, G5_method);
duke@0 3152
jrose@1485 3153 load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
duke@0 3154 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
duke@0 3155
duke@0 3156 // get receiver
duke@0 3157 __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
duke@0 3158 __ load_receiver(Rscratch, O0);
duke@0 3159 __ verify_oop(O0);
duke@0 3160
duke@0 3161 __ mov(Rflags, Rret);
duke@0 3162
duke@0 3163 // get return address
twisti@727 3164 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
twisti@727 3165 __ set(table, Rscratch);
duke@0 3166 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
duke@0 3167 // Make sure we don't need to mask Rret for tosBits after the above shift
duke@0 3168 ConstantPoolCacheEntry::verify_tosBits();
duke@0 3169 __ sll(Rret, LogBytesPerWord, Rret);
duke@0 3170 __ ld_ptr(Rscratch, Rret, Rret); // get return address
duke@0 3171
duke@0 3172 // get receiver klass
duke@0 3173 __ null_check(O0, oopDesc::klass_offset_in_bytes());
coleenp@113 3174 __ load_klass(O0, RklassOop);
duke@0 3175 __ verify_oop(RklassOop);
duke@0 3176
duke@0 3177 // Special case of invokeinterface called for virtual method of
duke@0 3178 // java.lang.Object. See cpCacheOop.cpp for details.
duke@0 3179 // This code isn't produced by javac, but could be produced by
duke@0 3180 // another compliant java compiler.
duke@0 3181 Label notMethod;
duke@0 3182 __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
duke@0 3183 __ btst(Rflags, Rscratch);
duke@0 3184 __ br(Assembler::zero, false, Assembler::pt, notMethod);
duke@0 3185 __ delayed()->nop();
duke@0 3186
duke@0 3187 invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
duke@0 3188
duke@0 3189 __ bind(notMethod);
duke@0 3190
duke@0 3191 __ profile_virtual_call(RklassOop, O4);
duke@0 3192
duke@0 3193 //
duke@0 3194 // find entry point to call
duke@0 3195 //
duke@0 3196
duke@0 3197 // compute start of first itableOffsetEntry (which is at end of vtable)
duke@0 3198 const int base = instanceKlass::vtable_start_offset() * wordSize;
duke@0 3199 Label search;
duke@0 3200 Register Rtemp = Rflags;
duke@0 3201
twisti@727 3202 __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
duke@0 3203 if (align_object_offset(1) > 1) {
duke@0 3204 __ round_to(Rtemp, align_object_offset(1));
duke@0 3205 }
duke@0 3206 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
duke@0 3207 if (Assembler::is_simm13(base)) {
duke@0 3208 __ add(Rtemp, base, Rtemp);
duke@0 3209 } else {
duke@0 3210 __ set(base, Rscratch);
duke@0 3211 __ add(Rscratch, Rtemp, Rtemp);
duke@0 3212 }
duke@0 3213 __ add(RklassOop, Rtemp, Rscratch);
duke@0 3214
duke@0 3215 __ bind(search);
duke@0 3216
duke@0 3217 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
duke@0 3218 {
duke@0 3219 Label ok;
duke@0 3220
duke@0 3221 // Check that entry is non-null. Null entries are probably a bytecode
twisti@605 3222 // problem. If the interface isn't implemented by the receiver class,
duke@0 3223 // the VM should throw IncompatibleClassChangeError. linkResolver checks
duke@0 3224 // this too but that's only if the entry isn't already resolved, so we
duke@0 3225 // need to check again.
duke@0 3226 __ br_notnull( Rtemp, false, Assembler::pt, ok);
duke@0 3227 __ delayed()->nop();
duke@0 3228 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
duke@0 3229 __ should_not_reach_here();
duke@0 3230 __ bind(ok);
duke@0 3231 __ verify_oop(Rtemp);
duke@0 3232 }
duke@0 3233
duke@0 3234 __ verify_oop(Rinterface);
duke@0 3235
duke@0 3236 __ cmp(Rinterface, Rtemp);
duke@0 3237 __ brx(Assembler::notEqual, true, Assembler::pn, search);
duke@0 3238 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
duke@0 3239
duke@0 3240 // entry found and Rscratch points to it
duke@0 3241 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
duke@0 3242
duke@0 3243 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
duke@0 3244 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
duke@0 3245 __ add(Rscratch, Rindex, Rscratch);
duke@0 3246 __ ld_ptr(RklassOop, Rscratch, G5_method);
duke@0 3247
duke@0 3248 // Check for abstract method error.
duke@0 3249 {
duke@0 3250 Label ok;
duke@0 3251 __ tst(G5_method);
duke@0 3252 __ brx(Assembler::notZero, false, Assembler::pt, ok);
duke@0 3253 __ delayed()->nop();
duke@0 3254 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
duke@0 3255 __ should_not_reach_here();
duke@0 3256 __ bind(ok);
duke@0 3257 }
duke@0 3258
duke@0 3259 Register Rcall = Rinterface;
duke@0 3260 assert_different_registers(Rcall, G5_method, Gargs, Rret);
duke@0 3261
duke@0 3262 __ verify_oop(G5_method);
duke@0 3263 __ call_from_interpreter(Rcall, Gargs, Rret);
duke@0 3264
duke@0 3265 }
duke@0 3266
duke@0 3267
jrose@726 3268 void TemplateTable::invokedynamic(int byte_no) {
jrose@726 3269 transition(vtos, vtos);
jrose@1485 3270 assert(byte_no == f1_oop, "use this argument");
jrose@726 3271
jrose@726 3272 if (!EnableInvokeDynamic) {
jrose@726 3273 // We should not encounter this bytecode if !EnableInvokeDynamic.
jrose@726 3274 // The verifier will stop it. However, if we get past the verifier,
jrose@726 3275 // this will stop the thread in a reasonable way, without crashing the JVM.
jrose@726 3276 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
jrose@726 3277 InterpreterRuntime::throw_IncompatibleClassChangeError));
jrose@726 3278 // the call_VM checks for exception, so we should never return here.
jrose@726 3279 __ should_not_reach_here();
jrose@726 3280 return;
jrose@726 3281 }
jrose@726 3282
twisti@1423 3283 // G5: CallSite object (f1)
twisti@1423 3284 // XX: unused (f2)
twisti@1423 3285 // XX: flags (unused)
twisti@1423 3286
twisti@1423 3287 Register G5_callsite = G5_method;
twisti@1423 3288 Register Rscratch = G3_scratch;
twisti@1423 3289 Register Rtemp = G1_scratch;
twisti@1423 3290 Register Rret = Lscratch;
twisti@1423 3291
jrose@1485 3292 load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
jrose@1485 3293 /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
twisti@1423 3294 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
twisti@1423 3295
twisti@1423 3296 __ verify_oop(G5_callsite);
twisti@1423 3297
twisti@1423 3298 // profile this call
twisti@1423 3299 __ profile_call(O4);
twisti@1423 3300
twisti@1423 3301 // get return address
twisti@1423 3302 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
twisti@1423 3303 __ set(table, Rtemp);
twisti@1423 3304 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
twisti@1423 3305 // Make sure we don't need to mask Rret for tosBits after the above shift
twisti@1423 3306 ConstantPoolCacheEntry::verify_tosBits();
twisti@1423 3307 __ sll(Rret, LogBytesPerWord, Rret);
twisti@1423 3308 __ ld_ptr(Rtemp, Rret, Rret); // get return address
twisti@1423 3309
jrose@2204 3310 __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
twisti@1423 3311 __ null_check(G3_method_handle);
twisti@1423 3312
twisti@1423 3313 // Adjust Rret first so Llast_SP can be same as Rret
twisti@1423 3314 __ add(Rret, -frame::pc_return_offset, O7);
twisti@1423 3315 __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
twisti@1423 3316 __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
twisti@1423 3317 // Record SP so we can remove any stack space allocated by adapter transition
twisti@1423 3318 __ delayed()->mov(SP, Llast_SP);
jrose@726 3319 }
jrose@726 3320
jrose@726 3321
duke@0 3322 //----------------------------------------------------------------------------------------------------
duke@0 3323 // Allocation
duke@0 3324
duke@0 3325 void TemplateTable::_new() {
duke@0 3326 transition(vtos, atos);
duke@0 3327
duke@0 3328 Label slow_case;
duke@0 3329 Label done;
duke@0 3330 Label initialize_header;
duke@0 3331 Label initialize_object; // including clearing the fields
duke@0 3332
duke@0 3333 Register RallocatedObject = Otos_i;
duke@0 3334 Register RinstanceKlass = O1;
duke@0 3335 Register Roffset = O3;
duke@0 3336 Register Rscratch = O4;
duke@0 3337
duke@0 3338 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
duke@0 3339 __ get_cpool_and_tags(Rscratch, G3_scratch);
duke@0 3340 // make sure the class we're about to instantiate has been resolved
bobv@1601 3341 // This is done before loading instanceKlass to be consistent with the order
bobv@1601 3342 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
duke@0 3343 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
duke@0 3344 __ ldub(G3_scratch, Roffset, G3_scratch);
duke@0 3345 __ cmp(G3_scratch, JVM_CONSTANT_Class);
duke@0 3346 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
duke@0 3347 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
bobv@1601 3348 // get instanceKlass
duke@0 3349 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
duke@0 3350 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
duke@0 3351 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
duke@0 3352
duke@0 3353 // make sure klass is fully initialized:
duke@0 3354 __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
duke@0 3355 __ cmp(G3_scratch, instanceKlass::fully_initialized);
duke@0 3356 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
duke@0 3357 __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
duke@0 3358
duke@0 3359 // get instance_size in instanceKlass (already aligned)
duke@0 3360 //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
duke@0 3361
duke@0 3362 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
duke@0 3363 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
duke@0 3364 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
duke@0 3365 __ delayed()->nop();
duke@0 3366
duke@0 3367 // allocate the instance
duke@0 3368 // 1) Try to allocate in the TLAB
duke@0 3369 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
duke@0 3370 // 3) if the above fails (or is not applicable), go to a slow case
duke@0 3371 // (creates a new TLAB, etc.)
duke@0 3372
duke@0 3373 const bool allow_shared_alloc =
duke@0 3374 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
duke@0 3375
duke@0 3376 if(UseTLAB) {
duke@0 3377 Register RoldTopValue = RallocatedObject;
duke@0 3378 Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
duke@0 3379 Register RnewTopValue = G1_scratch;
duke@0 3380 Register RendValue = Rscratch;
duke@0 3381 Register RfreeValue = RnewTopValue;
duke@0 3382
duke@0 3383 // check if we can allocate in the TLAB
duke@0 3384 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
duke@0 3385 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
duke@0 3386 __ add(RoldTopValue, Roffset, RnewTopValue);
duke@0 3387
duke@0 3388 // if there is enough space, we do not CAS and do not clear
duke@0 3389 __ cmp(RnewTopValue, RendValue);
duke@0 3390 if(ZeroTLAB) {
duke@0 3391 // the fields have already been cleared
duke@0 3392 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
duke@0 3393 } else {
duke@0 3394 // initialize both the header and fields
duke@0 3395 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
duke@0 3396 }
duke@0 3397 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
duke@0 3398
duke@0 3399 if (allow_shared_alloc) {
phh@1988 3400 // Check if tlab should be discarded (refill_waste_limit >= free)
phh@1988 3401 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
phh@1988 3402 __ sub(RendValue, RoldTopValue, RfreeValue);
duke@0 3403 #ifdef _LP64
phh@1988 3404 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
duke@0 3405 #else
phh@1988 3406 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
duke@0 3407 #endif
phh@1988 3408 __ cmp(RtlabWasteLimitValue, RfreeValue);
phh@1988 3409 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
phh@1988 3410 __ delayed()->nop();
phh@1988 3411
phh@1988 3412 // increment waste limit to prevent getting stuck on this slow path
phh@1988 3413 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
phh@1988 3414 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
duke@0 3415 } else {
duke@0 3416 // No allocation in the shared eden.
duke@0 3417 __ br(Assembler::always, false, Assembler::pt, slow_case);
duke@0 3418 __ delayed()->nop();
duke@0 3419 }
duke@0 3420 }
duke@0 3421
duke@0 3422 // Allocation in the shared Eden
duke@0 3423 if (allow_shared_alloc) {
duke@0 3424 Register RoldTopValue = G1_scratch;
duke@0 3425 Register RtopAddr = G3_scratch;
duke@0 3426 Register RnewTopValue = RallocatedObject;
duke@0 3427 Register RendValue = Rscratch;
duke@0 3428
duke@0 3429 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
duke@0 3430
duke@0 3431 Label retry;
duke@0 3432 __ bind(retry);
duke@0 3433 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
duke@0 3434 __ ld_ptr(RendValue, 0, RendValue);
duke@0 3435 __ ld_ptr(RtopAddr, 0, RoldTopValue);
duke@0 3436 __ add(RoldTopValue, Roffset, RnewTopValue);
duke@0 3437
duke@0 3438 // RnewTopValue contains the top address after the new object
duke@0 3439 // has been allocated.
duke@0 3440 __ cmp(RnewTopValue, RendValue);
duke@0 3441 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
duke@0 3442 __ delayed()->nop();
duke@0 3443
duke@0 3444 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
duke@0 3445 VM_Version::v9_instructions_work() ? NULL :
duke@0 3446 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
duke@0 3447
duke@0 3448 // if someone beat us on the allocation, try again, otherwise continue
duke@0 3449 __ cmp(RoldTopValue, RnewTopValue);
duke@0 3450 __ brx(Assembler::notEqual, false, Assembler::pn, retry);
duke@0 3451 __ delayed()->nop();
phh@1988 3452
phh@1988 3453 // bump total bytes allocated by this thread
phh@2012 3454 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
phh@2012 3455 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
duke@0 3456 }
duke@0 3457
duke@0 3458 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
duke@0 3459 // clear object fields
duke@0 3460 __ bind(initialize_object);
duke@0 3461 __ deccc(Roffset, sizeof(oopDesc));
duke@0 3462 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
duke@0 3463 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
duke@0 3464
duke@0 3465 // initialize remaining object fields
duke@0 3466 { Label loop;
duke@0 3467 __ subcc(Roffset, wordSize, Roffset);
duke@0 3468 __ bind(loop);
duke@0 3469 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
duke@0 3470 __ st_ptr(G0, G3_scratch, Roffset);
duke@0 3471 __ br(Assembler::notEqual, false, Assembler::pt, loop);
duke@0 3472 __ delayed()->subcc(Roffset, wordSize, Roffset);
duke@0 3473 }
duke@0 3474 __ br(Assembler::always, false, Assembler::pt, initialize_header);
duke@0 3475 __ delayed()->nop();
duke@0 3476 }
duke@0 3477
duke@0 3478 // slow case
duke@0 3479 __ bind(slow_case);
duke@0 3480 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
duke@0 3481 __ get_constant_pool(O1);
duke@0 3482
duke@0 3483 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
duke@0 3484
duke@0 3485 __ ba(false, done);
duke@0 3486 __ delayed()->nop();
duke@0 3487
duke@0 3488 // Initialize the header: mark, klass
duke@0 3489 __ bind(initialize_header);
duke@0 3490
duke@0 3491 if (UseBiasedLocking) {
duke@0 3492 __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
duke@0 3493 } else {
duke@0 3494 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
duke@0 3495 }
duke@0 3496 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
coleenp@167 3497 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
coleenp@167 3498 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
duke@0 3499
duke@0 3500 {
duke@0 3501 SkipIfEqual skip_if(
duke@0 3502 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
duke@0 3503 // Trigger dtrace event
duke@0 3504 __ push(atos);
duke@0 3505 __ call_VM_leaf(noreg,
duke@0 3506 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
duke@0 3507 __ pop(atos);
duke@0 3508 }
duke@0 3509
duke@0 3510 // continue
duke@0 3511 __ bind(done);
duke@0 3512 }
duke@0 3513
duke@0 3514
duke@0 3515
duke@0 3516 void TemplateTable::newarray() {
duke@0 3517 transition(itos, atos);
duke@0 3518 __ ldub(Lbcp, 1, O1);
duke@0 3519 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
duke@0 3520 }
duke@0 3521
duke@0 3522
duke@0 3523 void TemplateTable::anewarray() {
duke@0 3524 transition(itos, atos);
duke@0 3525 __ get_constant_pool(O1);
duke@0 3526 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
duke@0 3527 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
duke@0 3528 }
duke@0 3529
duke@0 3530
duke@0 3531 void TemplateTable::arraylength() {
duke@0 3532 transition(atos, itos);
duke@0 3533 Label ok;
duke@0 3534 __ verify_oop(Otos_i);
duke@0 3535 __ tst(Otos_i);
duke@0 3536 __ throw_if_not_1_x( Assembler::notZero, ok );
duke@0 3537 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
duke@0 3538 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
duke@0 3539 }
duke@0 3540
duke@0 3541
duke@0 3542 void TemplateTable::checkcast() {
duke@0 3543 transition(atos, atos);
duke@0 3544 Label done, is_null, quicked, cast_ok, resolved;
duke@0 3545 Register Roffset = G1_scratch;
duke@0 3546 Register RobjKlass = O5;
duke@0 3547 Register RspecifiedKlass = O4;
duke@0 3548
duke@0 3549 // Check for casting a NULL
duke@0 3550 __ br_null(Otos_i, false, Assembler::pn, is_null);
duke@0 3551 __ delayed()->nop();
duke@0 3552
duke@0 3553 // Get value klass in RobjKlass
coleenp@113 3554 __ load_klass(Otos_i, RobjKlass); // get value klass
duke@0 3555
duke@0 3556 // Get constant pool tag
duke@0 3557 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
duke@0 3558
duke@0 3559 // See if the checkcast has been quickened
duke@0 3560 __ get_cpool_and_tags(Lscratch, G3_scratch);
duke@0 3561 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
duke@0 3562 __ ldub(G3_scratch, Roffset, G3_scratch);
duke@0 3563 __ cmp(G3_scratch, JVM_CONSTANT_Class);
duke@0 3564 __ br(Assembler::equal, true, Assembler::pt, quicked);
duke@0 3565 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
duke@0 3566
duke@0 3567 __ push_ptr(); // save receiver for result, and for GC
duke@0 3568 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
duke@0 3569 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
duke@0 3570
duke@0 3571 __ br(Assembler::always, false, Assembler::pt, resolved);
coleenp@113 3572 __ delayed()->nop();
duke@0 3573
duke@0 3574 // Extract target class from constant pool
duke@0 3575 __ bind(quicked);
duke@0 3576 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
duke@0 3577 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
duke@0 3578 __ bind(resolved);
coleenp@113 3579 __ load_klass(Otos_i, RobjKlass); // get value klass
duke@0 3580
duke@0 3581 // Generate a fast subtype check. Branch to cast_ok if no
duke@0 3582 // failure. Throw exception if failure.
duke@0 3583 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
duke@0 3584
duke@0 3585 // Not a subtype; so must throw exception
duke@0 3586 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
duke@0 3587
duke@0 3588 __ bind(cast_ok);
duke@0 3589
duke@0 3590 if (ProfileInterpreter) {
duke@0 3591 __ ba(false, done);
duke@0 3592 __ delayed()->nop();
duke@0 3593 }
duke@0 3594 __ bind(is_null);
duke@0 3595 __ profile_null_seen(G3_scratch);
duke@0 3596 __ bind(done);
duke@0 3597 }
duke@0 3598
duke@0 3599
duke@0 3600 void TemplateTable::instanceof() {
duke@0 3601 Label done, is_null, quicked, resolved;
duke@0 3602 transition(atos, itos);
duke@0 3603 Register Roffset = G1_scratch;
duke@0 3604 Register RobjKlass = O5;
duke@0 3605 Register RspecifiedKlass = O4;
duke@0 3606
duke@0 3607 // Check for casting a NULL
duke@0 3608 __ br_null(Otos_i, false, Assembler::pt, is_null);
duke@0 3609 __ delayed()->nop();
duke@0 3610
duke@0 3611 // Get value klass in RobjKlass
coleenp@113 3612 __ load_klass(Otos_i, RobjKlass); // get value klass
duke@0 3613
duke@0 3614 // Get constant pool tag
duke@0 3615 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
duke@0 3616
duke@0 3617 // See if the checkcast has been quickened
duke@0 3618 __ get_cpool_and_tags(Lscratch, G3_scratch);
duke@0 3619 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
duke@0 3620 __ ldub(G3_scratch, Roffset, G3_scratch);
duke@0 3621 __ cmp(G3_scratch, JVM_CONSTANT_Class);
duke@0 3622 __ br(Assembler::equal, true, Assembler::pt, quicked);
duke@0 3623 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
duke@0 3624
duke@0 3625 __ push_ptr(); // save receiver for result, and for GC
duke@0 3626 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
duke@0 3627 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
duke@0 3628
duke@0 3629 __ br(Assembler::always, false, Assembler::pt, resolved);
coleenp@113 3630 __ delayed()->nop();
duke@0 3631
duke@0 3632
duke@0 3633 // Extract target class from constant pool
duke@0 3634 __ bind(quicked);
duke@0 3635 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
duke@0 3636 __ get_constant_pool(Lscratch);
duke@0 3637 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
duke@0 3638 __ bind(resolved);
coleenp@113 3639 __ load_klass(Otos_i, RobjKlass); // get value klass
duke@0 3640
duke@0 3641 // Generate a fast subtype check. Branch to cast_ok if no
duke@0 3642 // failure. Return 0 if failure.
duke@0 3643 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
duke@0 3644 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
duke@0 3645 // Not a subtype; return 0;
duke@0 3646 __ clr( Otos_i );
duke@0 3647
duke@0 3648 if (ProfileInterpreter) {
duke@0 3649 __ ba(false, done);
duke@0 3650 __ delayed()->nop();
duke@0 3651 }
duke@0 3652 __ bind(is_null);
duke@0 3653 __ profile_null_seen(G3_scratch);
duke@0 3654 __ bind(done);
duke@0 3655 }
duke@0 3656
duke@0 3657 void TemplateTable::_breakpoint() {
duke@0 3658
duke@0 3659 // Note: We get here even if we are single stepping..
duke@0 3660 // jbug inists on setting breakpoints at every bytecode
duke@0 3661 // even if we are in single step mode.
duke@0 3662
duke@0 3663 transition(vtos, vtos);
duke@0 3664 // get the unpatched byte code
duke@0 3665 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
duke@0 3666 __ mov(O0, Lbyte_code);
duke@0 3667
duke@0 3668 // post the breakpoint event
duke@0 3669 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
duke@0 3670
duke@0 3671 // complete the execution of original bytecode
duke@0 3672 __ dispatch_normal(vtos);
duke@0 3673 }
duke@0 3674
duke@0 3675
duke@0 3676 //----------------------------------------------------------------------------------------------------
duke@0 3677 // Exceptions
duke@0 3678
duke@0 3679 void TemplateTable::athrow() {
duke@0