annotate src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp @ 12272:6996f14f9d02

8167578: C1: compiler.escapeAnalysis.TestArrayCopy fails to throw ArrayStoreException Summary: Remove code that causes C1's arraycopy to skip type checks if the length argument is 0 Reviewed-by: kvn
author zmajo
date Tue, 01 Nov 2016 09:19:14 +0100
parents 9146e68921cc
children 824a8dcba48b
rev   line source
aph@7880 1 /*
pliden@8412 2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
aph@7880 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
aph@7880 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aph@7880 5 *
aph@7880 6 * This code is free software; you can redistribute it and/or modify it
aph@7880 7 * under the terms of the GNU General Public License version 2 only, as
aph@7880 8 * published by the Free Software Foundation.
aph@7880 9 *
aph@7880 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aph@7880 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aph@7880 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aph@7880 13 * version 2 for more details (a copy is included in the LICENSE file that
aph@7880 14 * accompanied this code).
aph@7880 15 *
aph@7880 16 * You should have received a copy of the GNU General Public License version
aph@7880 17 * 2 along with this work; if not, write to the Free Software Foundation,
aph@7880 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aph@7880 19 *
aph@7880 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aph@7880 21 * or visit www.oracle.com if you need additional information or have any
aph@7880 22 * questions.
aph@7880 23 *
aph@7880 24 */
aph@7880 25
aph@7880 26 #include "precompiled.hpp"
aph@7880 27 #include "asm/assembler.hpp"
aph@7880 28 #include "c1/c1_CodeStubs.hpp"
aph@7880 29 #include "c1/c1_Compilation.hpp"
aph@7880 30 #include "c1/c1_LIRAssembler.hpp"
aph@7880 31 #include "c1/c1_MacroAssembler.hpp"
aph@7880 32 #include "c1/c1_Runtime1.hpp"
aph@7880 33 #include "c1/c1_ValueStack.hpp"
aph@7880 34 #include "ci/ciArrayKlass.hpp"
aph@7880 35 #include "ci/ciInstance.hpp"
pliden@8412 36 #include "gc/shared/barrierSet.hpp"
pliden@8412 37 #include "gc/shared/cardTableModRefBS.hpp"
pliden@8412 38 #include "gc/shared/collectedHeap.hpp"
aph@7880 39 #include "nativeInst_aarch64.hpp"
aph@7880 40 #include "oops/objArrayKlass.hpp"
aph@7880 41 #include "runtime/sharedRuntime.hpp"
aph@7880 42 #include "vmreg_aarch64.inline.hpp"
aph@7880 43
aph@7880 44
aph@7880 45
aph@7880 46 #ifndef PRODUCT
aph@7880 47 #define COMMENT(x) do { __ block_comment(x); } while (0)
aph@7880 48 #else
aph@7880 49 #define COMMENT(x)
aph@7880 50 #endif
aph@7880 51
aph@7880 52 NEEDS_CLEANUP // remove this definitions ?
aph@7880 53 const Register IC_Klass = rscratch2; // where the IC klass is cached
aph@7880 54 const Register SYNC_header = r0; // synchronization header
aph@7880 55 const Register SHIFT_count = r0; // where count for shift operations must be
aph@7880 56
aph@7880 57 #define __ _masm->
aph@7880 58
aph@7880 59
aph@7880 60 static void select_different_registers(Register preserve,
aph@7880 61 Register extra,
aph@7880 62 Register &tmp1,
aph@7880 63 Register &tmp2) {
aph@7880 64 if (tmp1 == preserve) {
aph@7880 65 assert_different_registers(tmp1, tmp2, extra);
aph@7880 66 tmp1 = extra;
aph@7880 67 } else if (tmp2 == preserve) {
aph@7880 68 assert_different_registers(tmp1, tmp2, extra);
aph@7880 69 tmp2 = extra;
aph@7880 70 }
aph@7880 71 assert_different_registers(preserve, tmp1, tmp2);
aph@7880 72 }
aph@7880 73
aph@7880 74
aph@7880 75
aph@7880 76 static void select_different_registers(Register preserve,
aph@7880 77 Register extra,
aph@7880 78 Register &tmp1,
aph@7880 79 Register &tmp2,
aph@7880 80 Register &tmp3) {
aph@7880 81 if (tmp1 == preserve) {
aph@7880 82 assert_different_registers(tmp1, tmp2, tmp3, extra);
aph@7880 83 tmp1 = extra;
aph@7880 84 } else if (tmp2 == preserve) {
aph@7880 85 assert_different_registers(tmp1, tmp2, tmp3, extra);
aph@7880 86 tmp2 = extra;
aph@7880 87 } else if (tmp3 == preserve) {
aph@7880 88 assert_different_registers(tmp1, tmp2, tmp3, extra);
aph@7880 89 tmp3 = extra;
aph@7880 90 }
aph@7880 91 assert_different_registers(preserve, tmp1, tmp2, tmp3);
aph@7880 92 }
aph@7880 93
aph@7880 94
aph@7880 95 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
aph@7880 96
aph@7880 97
aph@7880 98 LIR_Opr LIR_Assembler::receiverOpr() {
aph@7880 99 return FrameMap::receiver_opr;
aph@7880 100 }
aph@7880 101
aph@7880 102 LIR_Opr LIR_Assembler::osrBufferPointer() {
aph@7880 103 return FrameMap::as_pointer_opr(receiverOpr()->as_register());
aph@7880 104 }
aph@7880 105
aph@7880 106 //--------------fpu register translations-----------------------
aph@7880 107
aph@7880 108
aph@7880 109 address LIR_Assembler::float_constant(float f) {
aph@7880 110 address const_addr = __ float_constant(f);
aph@7880 111 if (const_addr == NULL) {
aph@7880 112 bailout("const section overflow");
aph@7880 113 return __ code()->consts()->start();
aph@7880 114 } else {
aph@7880 115 return const_addr;
aph@7880 116 }
aph@7880 117 }
aph@7880 118
aph@7880 119
aph@7880 120 address LIR_Assembler::double_constant(double d) {
aph@7880 121 address const_addr = __ double_constant(d);
aph@7880 122 if (const_addr == NULL) {
aph@7880 123 bailout("const section overflow");
aph@7880 124 return __ code()->consts()->start();
aph@7880 125 } else {
aph@7880 126 return const_addr;
aph@7880 127 }
aph@7880 128 }
aph@7880 129
aph@7880 130 address LIR_Assembler::int_constant(jlong n) {
aph@7880 131 address const_addr = __ long_constant(n);
aph@7880 132 if (const_addr == NULL) {
aph@7880 133 bailout("const section overflow");
aph@7880 134 return __ code()->consts()->start();
aph@7880 135 } else {
aph@7880 136 return const_addr;
aph@7880 137 }
aph@7880 138 }
aph@7880 139
aph@7880 140 void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
aph@7880 141
aph@7880 142 void LIR_Assembler::reset_FPU() { Unimplemented(); }
aph@7880 143
aph@7880 144 void LIR_Assembler::fpop() { Unimplemented(); }
aph@7880 145
aph@7880 146 void LIR_Assembler::fxch(int i) { Unimplemented(); }
aph@7880 147
aph@7880 148 void LIR_Assembler::fld(int i) { Unimplemented(); }
aph@7880 149
aph@7880 150 void LIR_Assembler::ffree(int i) { Unimplemented(); }
aph@7880 151
aph@7880 152 void LIR_Assembler::breakpoint() { Unimplemented(); }
aph@7880 153
aph@7880 154 void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
aph@7880 155
aph@7880 156 void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
aph@7880 157
aph@7880 158 bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
aph@7880 159 //-------------------------------------------
aph@7880 160
aph@7880 161 static Register as_reg(LIR_Opr op) {
aph@7880 162 return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
aph@7880 163 }
aph@7880 164
aph@7880 165 static jlong as_long(LIR_Opr data) {
aph@7880 166 jlong result;
aph@7880 167 switch (data->type()) {
aph@7880 168 case T_INT:
aph@7880 169 result = (data->as_jint());
aph@7880 170 break;
aph@7880 171 case T_LONG:
aph@7880 172 result = (data->as_jlong());
aph@7880 173 break;
aph@7880 174 default:
aph@7880 175 ShouldNotReachHere();
aph@9792 176 result = 0; // unreachable
aph@7880 177 }
aph@7880 178 return result;
aph@7880 179 }
aph@7880 180
aph@7880 181 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
aph@7880 182 Register base = addr->base()->as_pointer_register();
aph@7880 183 LIR_Opr opr = addr->index();
aph@7880 184 if (opr->is_cpu_register()) {
aph@7880 185 Register index;
aph@7880 186 if (opr->is_single_cpu())
aph@7880 187 index = opr->as_register();
aph@7880 188 else
aph@7880 189 index = opr->as_register_lo();
aph@7880 190 assert(addr->disp() == 0, "must be");
aph@7880 191 switch(opr->type()) {
aph@7880 192 case T_INT:
aph@7880 193 return Address(base, index, Address::sxtw(addr->scale()));
aph@7880 194 case T_LONG:
aph@7880 195 return Address(base, index, Address::lsl(addr->scale()));
aph@7880 196 default:
aph@7880 197 ShouldNotReachHere();
aph@7880 198 }
aph@7880 199 } else {
aph@7880 200 intptr_t addr_offset = intptr_t(addr->disp());
aph@7880 201 if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
aph@7880 202 return Address(base, addr_offset, Address::lsl(addr->scale()));
aph@7880 203 else {
aph@7880 204 __ mov(tmp, addr_offset);
aph@7880 205 return Address(base, tmp, Address::lsl(addr->scale()));
aph@7880 206 }
aph@7880 207 }
aph@7880 208 return Address();
aph@7880 209 }
aph@7880 210
aph@7880 211 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
aph@7880 212 ShouldNotReachHere();
aph@7880 213 return Address();
aph@7880 214 }
aph@7880 215
aph@7880 216 Address LIR_Assembler::as_Address(LIR_Address* addr) {
aph@7880 217 return as_Address(addr, rscratch1);
aph@7880 218 }
aph@7880 219
aph@7880 220 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
aph@7880 221 return as_Address(addr, rscratch1); // Ouch
aph@7880 222 // FIXME: This needs to be much more clever. See x86.
aph@7880 223 }
aph@7880 224
aph@7880 225
aph@7880 226 void LIR_Assembler::osr_entry() {
aph@7880 227 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
aph@7880 228 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
aph@7880 229 ValueStack* entry_state = osr_entry->state();
aph@7880 230 int number_of_locks = entry_state->locks_size();
aph@7880 231
aph@7880 232 // we jump here if osr happens with the interpreter
aph@7880 233 // state set up to continue at the beginning of the
aph@7880 234 // loop that triggered osr - in particular, we have
aph@7880 235 // the following registers setup:
aph@7880 236 //
aph@7880 237 // r2: osr buffer
aph@7880 238 //
aph@7880 239
aph@7880 240 // build frame
aph@7880 241 ciMethod* m = compilation()->method();
aph@7880 242 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
aph@7880 243
aph@7880 244 // OSR buffer is
aph@7880 245 //
aph@7880 246 // locals[nlocals-1..0]
aph@7880 247 // monitors[0..number_of_locks]
aph@7880 248 //
aph@7880 249 // locals is a direct copy of the interpreter frame so in the osr buffer
aph@7880 250 // so first slot in the local array is the last local from the interpreter
aph@7880 251 // and last slot is local[0] (receiver) from the interpreter
aph@7880 252 //
aph@7880 253 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
aph@7880 254 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
aph@7880 255 // in the interpreter frame (the method lock if a sync method)
aph@7880 256
aph@7880 257 // Initialize monitors in the compiled activation.
aph@7880 258 // r2: pointer to osr buffer
aph@7880 259 //
aph@7880 260 // All other registers are dead at this point and the locals will be
aph@7880 261 // copied into place by code emitted in the IR.
aph@7880 262
aph@7880 263 Register OSR_buf = osrBufferPointer()->as_pointer_register();
aph@7880 264 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
aph@7880 265 int monitor_offset = BytesPerWord * method()->max_locals() +
aph@7880 266 (2 * BytesPerWord) * (number_of_locks - 1);
aph@7880 267 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
aph@7880 268 // the OSR buffer using 2 word entries: first the lock and then
aph@7880 269 // the oop.
aph@7880 270 for (int i = 0; i < number_of_locks; i++) {
aph@7880 271 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
aph@7880 272 #ifdef ASSERT
aph@7880 273 // verify the interpreter's monitor has a non-null object
aph@7880 274 {
aph@7880 275 Label L;
aph@7880 276 __ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
aph@7880 277 __ cbnz(rscratch1, L);
aph@7880 278 __ stop("locked object is NULL");
aph@7880 279 __ bind(L);
aph@7880 280 }
aph@7880 281 #endif
aph@7880 282 __ ldr(r19, Address(OSR_buf, slot_offset + 0));
aph@7880 283 __ str(r19, frame_map()->address_for_monitor_lock(i));
aph@7880 284 __ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
aph@7880 285 __ str(r19, frame_map()->address_for_monitor_object(i));
aph@7880 286 }
aph@7880 287 }
aph@7880 288 }
aph@7880 289
aph@7880 290
aph@7880 291 // inline cache check; done before the frame is built.
aph@7880 292 int LIR_Assembler::check_icache() {
aph@7880 293 Register receiver = FrameMap::receiver_opr->as_register();
aph@7880 294 Register ic_klass = IC_Klass;
aph@7880 295 int start_offset = __ offset();
aph@7880 296 __ inline_cache_check(receiver, ic_klass);
aph@7880 297
aph@7880 298 // if icache check fails, then jump to runtime routine
aph@7880 299 // Note: RECEIVER must still contain the receiver!
aph@7880 300 Label dont;
aph@7880 301 __ br(Assembler::EQ, dont);
aph@7880 302 __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
aph@7880 303
aph@7880 304 // We align the verified entry point unless the method body
aph@7880 305 // (including its inline cache check) will fit in a single 64-byte
aph@7880 306 // icache line.
aph@7880 307 if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
aph@7880 308 // force alignment after the cache check.
aph@7880 309 __ align(CodeEntryAlignment);
aph@7880 310 }
aph@7880 311
aph@7880 312 __ bind(dont);
aph@7880 313 return start_offset;
aph@7880 314 }
aph@7880 315
aph@7880 316
aph@7880 317 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
aph@7880 318 if (o == NULL) {
aph@7880 319 __ mov(reg, zr);
aph@7880 320 } else {
aph@7880 321 __ movoop(reg, o, /*immediate*/true);
aph@7880 322 }
aph@7880 323 }
aph@7880 324
aph@7880 325 void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
aph@7880 326 address target = NULL;
aph@7880 327 relocInfo::relocType reloc_type = relocInfo::none;
aph@7880 328
aph@7880 329 switch (patching_id(info)) {
aph@7880 330 case PatchingStub::access_field_id:
aph@7880 331 target = Runtime1::entry_for(Runtime1::access_field_patching_id);
aph@7880 332 reloc_type = relocInfo::section_word_type;
aph@7880 333 break;
aph@7880 334 case PatchingStub::load_klass_id:
aph@7880 335 target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
aph@7880 336 reloc_type = relocInfo::metadata_type;
aph@7880 337 break;
aph@7880 338 case PatchingStub::load_mirror_id:
aph@7880 339 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
aph@7880 340 reloc_type = relocInfo::oop_type;
aph@7880 341 break;
aph@7880 342 case PatchingStub::load_appendix_id:
aph@7880 343 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
aph@7880 344 reloc_type = relocInfo::oop_type;
aph@7880 345 break;
aph@7880 346 default: ShouldNotReachHere();
aph@7880 347 }
aph@7880 348
aph@7880 349 __ far_call(RuntimeAddress(target));
aph@7880 350 add_call_info_here(info);
aph@7880 351 }
aph@7880 352
aph@7880 353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
aph@7880 354 deoptimize_trap(info);
aph@7880 355 }
aph@7880 356
aph@7880 357
aph@7880 358 // This specifies the rsp decrement needed to build the frame
aph@7880 359 int LIR_Assembler::initial_frame_size_in_bytes() const {
aph@7880 360 // if rounding, must let FrameMap know!
aph@7880 361
aph@7880 362 // The frame_map records size in slots (32bit word)
aph@7880 363
aph@7880 364 // subtract two words to account for return address and link
aph@7880 365 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
aph@7880 366 }
aph@7880 367
aph@7880 368
aph@7880 369 int LIR_Assembler::emit_exception_handler() {
aph@7880 370 // if the last instruction is a call (typically to do a throw which
aph@7880 371 // is coming at the end after block reordering) the return address
aph@7880 372 // must still point into the code area in order to avoid assertion
aph@7880 373 // failures when searching for the corresponding bci => add a nop
aph@7880 374 // (was bug 5/14/1999 - gri)
aph@7880 375 __ nop();
aph@7880 376
aph@7880 377 // generate code for exception handler
aph@7880 378 address handler_base = __ start_a_stub(exception_handler_size);
aph@7880 379 if (handler_base == NULL) {
aph@7880 380 // not enough space left for the handler
aph@7880 381 bailout("exception handler overflow");
aph@7880 382 return -1;
aph@7880 383 }
aph@7880 384
aph@7880 385 int offset = code_offset();
aph@7880 386
aph@7880 387 // the exception oop and pc are in r0, and r3
aph@7880 388 // no other registers need to be preserved, so invalidate them
aph@7880 389 __ invalidate_registers(false, true, true, false, true, true);
aph@7880 390
aph@7880 391 // check that there is really an exception
aph@7880 392 __ verify_not_null_oop(r0);
aph@7880 393
aph@7880 394 // search an exception handler (r0: exception oop, r3: throwing pc)
aph@7880 395 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();
aph@7880 396 guarantee(code_offset() - offset <= exception_handler_size, "overflow");
aph@7880 397 __ end_a_stub();
aph@7880 398
aph@7880 399 return offset;
aph@7880 400 }
aph@7880 401
aph@7880 402
aph@7880 403 // Emit the code to remove the frame from the stack in the exception
aph@7880 404 // unwind path.
aph@7880 405 int LIR_Assembler::emit_unwind_handler() {
aph@7880 406 #ifndef PRODUCT
aph@7880 407 if (CommentedAssembly) {
aph@7880 408 _masm->block_comment("Unwind handler");
aph@7880 409 }
aph@7880 410 #endif
aph@7880 411
aph@7880 412 int offset = code_offset();
aph@7880 413
aph@7880 414 // Fetch the exception from TLS and clear out exception related thread state
aph@7880 415 __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
aph@7880 416 __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
aph@7880 417 __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
aph@7880 418
aph@7880 419 __ bind(_unwind_handler_entry);
aph@7880 420 __ verify_not_null_oop(r0);
aph@7880 421 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
aph@7880 422 __ mov(r19, r0); // Preserve the exception
aph@7880 423 }
aph@7880 424
aph@7880 425 // Preform needed unlocking
aph@7880 426 MonitorExitStub* stub = NULL;
aph@7880 427 if (method()->is_synchronized()) {
aph@7880 428 monitor_address(0, FrameMap::r0_opr);
aph@7880 429 stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
aph@7880 430 __ unlock_object(r5, r4, r0, *stub->entry());
aph@7880 431 __ bind(*stub->continuation());
aph@7880 432 }
aph@7880 433
aph@7880 434 if (compilation()->env()->dtrace_method_probes()) {
aph@7880 435 __ call_Unimplemented();
aph@7880 436 #if 0
aph@7880 437 __ movptr(Address(rsp, 0), rax);
aph@7880 438 __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
aph@7880 439 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
aph@7880 440 #endif
aph@7880 441 }
aph@7880 442
aph@7880 443 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
aph@7880 444 __ mov(r0, r19); // Restore the exception
aph@7880 445 }
aph@7880 446
aph@7880 447 // remove the activation and dispatch to the unwind handler
aph@7880 448 __ block_comment("remove_frame and dispatch to the unwind handler");
aph@7880 449 __ remove_frame(initial_frame_size_in_bytes());
aph@7880 450 __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
aph@7880 451
aph@7880 452 // Emit the slow path assembly
aph@7880 453 if (stub != NULL) {
aph@7880 454 stub->emit_code(this);
aph@7880 455 }
aph@7880 456
aph@7880 457 return offset;
aph@7880 458 }
aph@7880 459
aph@7880 460
aph@7880 461 int LIR_Assembler::emit_deopt_handler() {
aph@7880 462 // if the last instruction is a call (typically to do a throw which
aph@7880 463 // is coming at the end after block reordering) the return address
aph@7880 464 // must still point into the code area in order to avoid assertion
aph@7880 465 // failures when searching for the corresponding bci => add a nop
aph@7880 466 // (was bug 5/14/1999 - gri)
aph@7880 467 __ nop();
aph@7880 468
aph@7880 469 // generate code for exception handler
aph@7880 470 address handler_base = __ start_a_stub(deopt_handler_size);
aph@7880 471 if (handler_base == NULL) {
aph@7880 472 // not enough space left for the handler
aph@7880 473 bailout("deopt handler overflow");
aph@7880 474 return -1;
aph@7880 475 }
aph@7880 476
aph@7880 477 int offset = code_offset();
aph@7880 478
aph@7880 479 __ adr(lr, pc());
aph@7880 480 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
aph@7880 481 guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
aph@7880 482 __ end_a_stub();
aph@7880 483
aph@7880 484 return offset;
aph@7880 485 }
aph@7880 486
aph@7880 487 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
aph@7880 488 _masm->code_section()->relocate(adr, relocInfo::poll_type);
aph@7880 489 int pc_offset = code_offset();
aph@7880 490 flush_debug_info(pc_offset);
aph@7880 491 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
aph@7880 492 if (info->exception_handlers() != NULL) {
aph@7880 493 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
aph@7880 494 }
aph@7880 495 }
aph@7880 496
aph@7880 497 // Rather than take a segfault when the polling page is protected,
aph@7880 498 // explicitly check for a safepoint in progress and if there is one,
aph@7880 499 // fake a call to the handler as if a segfault had been caught.
aph@7880 500 void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
aph@7880 501 __ mov(rscratch1, SafepointSynchronize::address_of_state());
aph@7880 502 __ ldrb(rscratch1, Address(rscratch1));
aph@7880 503 Label nope, poll;
aph@7880 504 __ cbz(rscratch1, nope);
aph@7880 505 __ block_comment("safepoint");
aph@7880 506 __ enter();
aph@7880 507 __ push(0x3, sp); // r0 & r1
aph@7880 508 __ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
aph@7880 509 __ adr(r0, poll);
aph@7880 510 __ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
aph@7880 511 __ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));
aph@7880 512 __ blrt(rscratch1, 1, 0, 1);
aph@7880 513 __ maybe_isb();
aph@7880 514 __ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
aph@7880 515 __ mov(rscratch1, r0);
aph@7880 516 __ pop(0x3, sp); // r0 & r1
aph@7880 517 __ leave();
aph@7880 518 __ br(rscratch1);
aph@7880 519 address polling_page(os::get_polling_page());
aph@7880 520 assert(os::is_poll_address(polling_page), "should be");
aph@7880 521 unsigned long off;
aph@7880 522 __ adrp(rscratch1, Address(polling_page, rtype), off);
aph@7880 523 __ bind(poll);
aph@7880 524 if (info)
aph@7880 525 add_debug_info_for_branch(info); // This isn't just debug info:
aph@7880 526 // it's the oop map
aph@7880 527 else
aph@7880 528 __ code_section()->relocate(pc(), rtype);
aph@7880 529 __ ldrw(zr, Address(rscratch1, off));
aph@7880 530 __ bind(nope);
aph@7880 531 }
aph@7880 532
aph@7880 533 void LIR_Assembler::return_op(LIR_Opr result) {
aph@7880 534 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
aph@7880 535 // Pop the stack before the safepoint code
aph@7880 536 __ remove_frame(initial_frame_size_in_bytes());
aph@7880 537 address polling_page(os::get_polling_page());
aph@7880 538 __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
aph@7880 539 __ ret(lr);
aph@7880 540 }
aph@7880 541
aph@7880 542 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
aph@7880 543 address polling_page(os::get_polling_page());
aph@7880 544 guarantee(info != NULL, "Shouldn't be NULL");
aph@7880 545 assert(os::is_poll_address(polling_page), "should be");
aph@7880 546 unsigned long off;
aph@7880 547 __ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
aph@7880 548 assert(off == 0, "must be");
aph@7880 549 add_debug_info_for_branch(info); // This isn't just debug info:
aph@7880 550 // it's the oop map
aph@7880 551 __ read_polling_page(rscratch1, relocInfo::poll_type);
aph@7880 552 return __ offset();
aph@7880 553 }
aph@7880 554
aph@7880 555
aph@7880 556 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
aph@7880 557 if (from_reg == r31_sp)
aph@7880 558 from_reg = sp;
aph@7880 559 if (to_reg == r31_sp)
aph@7880 560 to_reg = sp;
aph@7880 561 __ mov(to_reg, from_reg);
aph@7880 562 }
aph@7880 563
aph@7880 564 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
aph@7880 565
aph@7880 566
aph@7880 567 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
aph@7880 568 assert(src->is_constant(), "should not call otherwise");
aph@7880 569 assert(dest->is_register(), "should not call otherwise");
aph@7880 570 LIR_Const* c = src->as_constant_ptr();
aph@7880 571
aph@7880 572 switch (c->type()) {
aph@7880 573 case T_INT: {
aph@7880 574 assert(patch_code == lir_patch_none, "no patching handled here");
aph@7880 575 __ movw(dest->as_register(), c->as_jint());
aph@7880 576 break;
aph@7880 577 }
aph@7880 578
aph@7880 579 case T_ADDRESS: {
aph@7880 580 assert(patch_code == lir_patch_none, "no patching handled here");
aph@7880 581 __ mov(dest->as_register(), c->as_jint());
aph@7880 582 break;
aph@7880 583 }
aph@7880 584
aph@7880 585 case T_LONG: {
aph@7880 586 assert(patch_code == lir_patch_none, "no patching handled here");
aph@7880 587 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
aph@7880 588 break;
aph@7880 589 }
aph@7880 590
aph@7880 591 case T_OBJECT: {
aph@7880 592 if (patch_code == lir_patch_none) {
aph@7880 593 jobject2reg(c->as_jobject(), dest->as_register());
aph@7880 594 } else {
aph@7880 595 jobject2reg_with_patching(dest->as_register(), info);
aph@7880 596 }
aph@7880 597 break;
aph@7880 598 }
aph@7880 599
aph@7880 600 case T_METADATA: {
aph@7880 601 if (patch_code != lir_patch_none) {
aph@7880 602 klass2reg_with_patching(dest->as_register(), info);
aph@7880 603 } else {
aph@7880 604 __ mov_metadata(dest->as_register(), c->as_metadata());
aph@7880 605 }
aph@7880 606 break;
aph@7880 607 }
aph@7880 608
aph@7880 609 case T_FLOAT: {
aph@7880 610 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
aph@7880 611 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
aph@7880 612 } else {
aph@7880 613 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
aph@7880 614 __ ldrs(dest->as_float_reg(), Address(rscratch1));
aph@7880 615 }
aph@7880 616 break;
aph@7880 617 }
aph@7880 618
aph@7880 619 case T_DOUBLE: {
aph@7880 620 if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
aph@7880 621 __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
aph@7880 622 } else {
aph@7880 623 __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
aph@7880 624 __ ldrd(dest->as_double_reg(), Address(rscratch1));
aph@7880 625 }
aph@7880 626 break;
aph@7880 627 }
aph@7880 628
aph@7880 629 default:
aph@7880 630 ShouldNotReachHere();
aph@7880 631 }
aph@7880 632 }
aph@7880 633
aph@7880 634 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
aph@7880 635 LIR_Const* c = src->as_constant_ptr();
aph@7880 636 switch (c->type()) {
aph@7880 637 case T_OBJECT:
aph@7880 638 {
aph@7880 639 if (! c->as_jobject())
aph@7880 640 __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 641 else {
aph@7880 642 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
aph@7880 643 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
aph@7880 644 }
aph@7880 645 }
aph@7880 646 break;
aph@7880 647 case T_ADDRESS:
aph@7880 648 {
aph@7880 649 const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
aph@7880 650 reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
aph@7880 651 }
aph@7880 652 case T_INT:
aph@7880 653 case T_FLOAT:
aph@7880 654 {
aph@7880 655 Register reg = zr;
aph@7880 656 if (c->as_jint_bits() == 0)
aph@7880 657 __ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 658 else {
aph@7880 659 __ movw(rscratch1, c->as_jint_bits());
aph@7880 660 __ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 661 }
aph@7880 662 }
aph@7880 663 break;
aph@7880 664 case T_LONG:
aph@7880 665 case T_DOUBLE:
aph@7880 666 {
aph@7880 667 Register reg = zr;
aph@7880 668 if (c->as_jlong_bits() == 0)
aph@7880 669 __ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
aph@7880 670 lo_word_offset_in_bytes));
aph@7880 671 else {
aph@7880 672 __ mov(rscratch1, (intptr_t)c->as_jlong_bits());
aph@7880 673 __ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
aph@7880 674 lo_word_offset_in_bytes));
aph@7880 675 }
aph@7880 676 }
aph@7880 677 break;
aph@7880 678 default:
aph@7880 679 ShouldNotReachHere();
aph@7880 680 }
aph@7880 681 }
aph@7880 682
aph@7880 683 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
aph@7880 684 assert(src->is_constant(), "should not call otherwise");
aph@7880 685 LIR_Const* c = src->as_constant_ptr();
aph@7880 686 LIR_Address* to_addr = dest->as_address_ptr();
aph@7880 687
aph@7880 688 void (Assembler::* insn)(Register Rt, const Address &adr);
aph@7880 689
aph@7880 690 switch (type) {
aph@7880 691 case T_ADDRESS:
aph@7880 692 assert(c->as_jint() == 0, "should be");
aph@7880 693 insn = &Assembler::str;
aph@7880 694 break;
aph@7880 695 case T_LONG:
aph@7880 696 assert(c->as_jlong() == 0, "should be");
aph@7880 697 insn = &Assembler::str;
aph@7880 698 break;
aph@7880 699 case T_INT:
aph@7880 700 assert(c->as_jint() == 0, "should be");
aph@7880 701 insn = &Assembler::strw;
aph@7880 702 break;
aph@7880 703 case T_OBJECT:
aph@7880 704 case T_ARRAY:
aph@7880 705 assert(c->as_jobject() == 0, "should be");
aph@7880 706 if (UseCompressedOops && !wide) {
aph@7880 707 insn = &Assembler::strw;
aph@7880 708 } else {
aph@7880 709 insn = &Assembler::str;
aph@7880 710 }
aph@7880 711 break;
aph@7880 712 case T_CHAR:
aph@7880 713 case T_SHORT:
aph@7880 714 assert(c->as_jint() == 0, "should be");
aph@7880 715 insn = &Assembler::strh;
aph@7880 716 break;
aph@7880 717 case T_BOOLEAN:
aph@7880 718 case T_BYTE:
aph@7880 719 assert(c->as_jint() == 0, "should be");
aph@7880 720 insn = &Assembler::strb;
aph@7880 721 break;
aph@7880 722 default:
aph@7880 723 ShouldNotReachHere();
aph@9792 724 insn = &Assembler::str; // unreachable
aph@7880 725 }
aph@7880 726
aph@7880 727 if (info) add_debug_info_for_null_check_here(info);
aph@7880 728 (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
aph@7880 729 }
aph@7880 730
aph@7880 731 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
aph@7880 732 assert(src->is_register(), "should not call otherwise");
aph@7880 733 assert(dest->is_register(), "should not call otherwise");
aph@7880 734
aph@7880 735 // move between cpu-registers
aph@7880 736 if (dest->is_single_cpu()) {
aph@7880 737 if (src->type() == T_LONG) {
aph@7880 738 // Can do LONG -> OBJECT
aph@7880 739 move_regs(src->as_register_lo(), dest->as_register());
aph@7880 740 return;
aph@7880 741 }
aph@7880 742 assert(src->is_single_cpu(), "must match");
aph@7880 743 if (src->type() == T_OBJECT) {
aph@7880 744 __ verify_oop(src->as_register());
aph@7880 745 }
aph@7880 746 move_regs(src->as_register(), dest->as_register());
aph@7880 747
aph@7880 748 } else if (dest->is_double_cpu()) {
aph@7880 749 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
aph@7880 750 // Surprising to me but we can see move of a long to t_object
aph@7880 751 __ verify_oop(src->as_register());
aph@7880 752 move_regs(src->as_register(), dest->as_register_lo());
aph@7880 753 return;
aph@7880 754 }
aph@7880 755 assert(src->is_double_cpu(), "must match");
aph@7880 756 Register f_lo = src->as_register_lo();
aph@7880 757 Register f_hi = src->as_register_hi();
aph@7880 758 Register t_lo = dest->as_register_lo();
aph@7880 759 Register t_hi = dest->as_register_hi();
aph@7880 760 assert(f_hi == f_lo, "must be same");
aph@7880 761 assert(t_hi == t_lo, "must be same");
aph@7880 762 move_regs(f_lo, t_lo);
aph@7880 763
aph@7880 764 } else if (dest->is_single_fpu()) {
aph@7880 765 __ fmovs(dest->as_float_reg(), src->as_float_reg());
aph@7880 766
aph@7880 767 } else if (dest->is_double_fpu()) {
aph@7880 768 __ fmovd(dest->as_double_reg(), src->as_double_reg());
aph@7880 769
aph@7880 770 } else {
aph@7880 771 ShouldNotReachHere();
aph@7880 772 }
aph@7880 773 }
aph@7880 774
aph@7880 775 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
aph@7880 776 if (src->is_single_cpu()) {
aph@7880 777 if (type == T_ARRAY || type == T_OBJECT) {
aph@7880 778 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 779 __ verify_oop(src->as_register());
aph@7880 780 } else if (type == T_METADATA || type == T_DOUBLE) {
aph@7880 781 __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 782 } else {
aph@7880 783 __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
aph@7880 784 }
aph@7880 785
aph@7880 786 } else if (src->is_double_cpu()) {
aph@7880 787 Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
aph@7880 788 __ str(src->as_register_lo(), dest_addr_LO);
aph@7880 789
aph@7880 790 } else if (src->is_single_fpu()) {
aph@7880 791 Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
aph@7880 792 __ strs(src->as_float_reg(), dest_addr);
aph@7880 793
aph@7880 794 } else if (src->is_double_fpu()) {
aph@7880 795 Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
aph@7880 796 __ strd(src->as_double_reg(), dest_addr);
aph@7880 797
aph@7880 798 } else {
aph@7880 799 ShouldNotReachHere();
aph@7880 800 }
aph@7880 801
aph@7880 802 }
aph@7880 803
aph@7880 804
aph@7880 805 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
aph@7880 806 LIR_Address* to_addr = dest->as_address_ptr();
aph@7880 807 PatchingStub* patch = NULL;
aph@7880 808 Register compressed_src = rscratch1;
aph@7880 809
aph@7880 810 if (patch_code != lir_patch_none) {
aph@7880 811 deoptimize_trap(info);
aph@7880 812 return;
aph@7880 813 }
aph@7880 814
aph@7880 815 if (type == T_ARRAY || type == T_OBJECT) {
aph@7880 816 __ verify_oop(src->as_register());
aph@7880 817
aph@7880 818 if (UseCompressedOops && !wide) {
aph@7880 819 __ encode_heap_oop(compressed_src, src->as_register());
aph@7880 820 } else {
aph@7880 821 compressed_src = src->as_register();
aph@7880 822 }
aph@7880 823 }
aph@7880 824
aph@7880 825 int null_check_here = code_offset();
aph@7880 826 switch (type) {
aph@7880 827 case T_FLOAT: {
aph@7880 828 __ strs(src->as_float_reg(), as_Address(to_addr));
aph@7880 829 break;
aph@7880 830 }
aph@7880 831
aph@7880 832 case T_DOUBLE: {
aph@7880 833 __ strd(src->as_double_reg(), as_Address(to_addr));
aph@7880 834 break;
aph@7880 835 }
aph@7880 836
aph@7880 837 case T_ARRAY: // fall through
aph@7880 838 case T_OBJECT: // fall through
aph@7880 839 if (UseCompressedOops && !wide) {
aph@7880 840 __ strw(compressed_src, as_Address(to_addr, rscratch2));
aph@7880 841 } else {
aph@7880 842 __ str(compressed_src, as_Address(to_addr));
aph@7880 843 }
aph@7880 844 break;
aph@7880 845 case T_METADATA:
aph@7880 846 // We get here to store a method pointer to the stack to pass to
aph@7880 847 // a dtrace runtime call. This can't work on 64 bit with
aph@7880 848 // compressed klass ptrs: T_METADATA can be a compressed klass
aph@7880 849 // ptr or a 64 bit method pointer.
aph@7880 850 ShouldNotReachHere();
aph@7880 851 __ str(src->as_register(), as_Address(to_addr));
aph@7880 852 break;
aph@7880 853 case T_ADDRESS:
aph@7880 854 __ str(src->as_register(), as_Address(to_addr));
aph@7880 855 break;
aph@7880 856 case T_INT:
aph@7880 857 __ strw(src->as_register(), as_Address(to_addr));
aph@7880 858 break;
aph@7880 859
aph@7880 860 case T_LONG: {
aph@7880 861 __ str(src->as_register_lo(), as_Address_lo(to_addr));
aph@7880 862 break;
aph@7880 863 }
aph@7880 864
aph@7880 865 case T_BYTE: // fall through
aph@7880 866 case T_BOOLEAN: {
aph@7880 867 __ strb(src->as_register(), as_Address(to_addr));
aph@7880 868 break;
aph@7880 869 }
aph@7880 870
aph@7880 871 case T_CHAR: // fall through
aph@7880 872 case T_SHORT:
aph@7880 873 __ strh(src->as_register(), as_Address(to_addr));
aph@7880 874 break;
aph@7880 875
aph@7880 876 default:
aph@7880 877 ShouldNotReachHere();
aph@7880 878 }
aph@7880 879 if (info != NULL) {
aph@7880 880 add_debug_info_for_null_check(null_check_here, info);
aph@7880 881 }
aph@7880 882 }
aph@7880 883
aph@7880 884
aph@7880 885 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
aph@7880 886 assert(src->is_stack(), "should not call otherwise");
aph@7880 887 assert(dest->is_register(), "should not call otherwise");
aph@7880 888
aph@7880 889 if (dest->is_single_cpu()) {
aph@7880 890 if (type == T_ARRAY || type == T_OBJECT) {
aph@7880 891 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
aph@7880 892 __ verify_oop(dest->as_register());
aph@7880 893 } else if (type == T_METADATA) {
aph@7880 894 __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
aph@7880 895 } else {
aph@7880 896 __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
aph@7880 897 }
aph@7880 898
aph@7880 899 } else if (dest->is_double_cpu()) {
aph@7880 900 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
aph@7880 901 __ ldr(dest->as_register_lo(), src_addr_LO);
aph@7880 902
aph@7880 903 } else if (dest->is_single_fpu()) {
aph@7880 904 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
aph@7880 905 __ ldrs(dest->as_float_reg(), src_addr);
aph@7880 906
aph@7880 907 } else if (dest->is_double_fpu()) {
aph@7880 908 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
aph@7880 909 __ ldrd(dest->as_double_reg(), src_addr);
aph@7880 910
aph@7880 911 } else {
aph@7880 912 ShouldNotReachHere();
aph@7880 913 }
aph@7880 914 }
aph@7880 915
aph@7880 916
aph@7880 917 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
aph@7880 918 address target = NULL;
aph@7880 919 relocInfo::relocType reloc_type = relocInfo::none;
aph@7880 920
aph@7880 921 switch (patching_id(info)) {
aph@7880 922 case PatchingStub::access_field_id:
aph@7880 923 target = Runtime1::entry_for(Runtime1::access_field_patching_id);
aph@7880 924 reloc_type = relocInfo::section_word_type;
aph@7880 925 break;
aph@7880 926 case PatchingStub::load_klass_id:
aph@7880 927 target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
aph@7880 928 reloc_type = relocInfo::metadata_type;
aph@7880 929 break;
aph@7880 930 case PatchingStub::load_mirror_id:
aph@7880 931 target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
aph@7880 932 reloc_type = relocInfo::oop_type;
aph@7880 933 break;
aph@7880 934 case PatchingStub::load_appendix_id:
aph@7880 935 target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
aph@7880 936 reloc_type = relocInfo::oop_type;
aph@7880 937 break;
aph@7880 938 default: ShouldNotReachHere();
aph@7880 939 }
aph@7880 940
aph@7880 941 __ far_call(RuntimeAddress(target));
aph@7880 942 add_call_info_here(info);
aph@7880 943 }
aph@7880 944
aph@7880 945 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
aph@7880 946
aph@7880 947 LIR_Opr temp;
aph@7880 948 if (type == T_LONG || type == T_DOUBLE)
aph@7880 949 temp = FrameMap::rscratch1_long_opr;
aph@7880 950 else
aph@7880 951 temp = FrameMap::rscratch1_opr;
aph@7880 952
aph@7880 953 stack2reg(src, temp, src->type());
aph@7880 954 reg2stack(temp, dest, dest->type(), false);
aph@7880 955 }
aph@7880 956
aph@7880 957
aph@7880 958 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
aph@7880 959 LIR_Address* addr = src->as_address_ptr();
aph@7880 960 LIR_Address* from_addr = src->as_address_ptr();
aph@7880 961
aph@7880 962 if (addr->base()->type() == T_OBJECT) {
aph@7880 963 __ verify_oop(addr->base()->as_pointer_register());
aph@7880 964 }
aph@7880 965
aph@7880 966 if (patch_code != lir_patch_none) {
aph@7880 967 deoptimize_trap(info);
aph@7880 968 return;
aph@7880 969 }
aph@7880 970
aph@7880 971 if (info != NULL) {
aph@7880 972 add_debug_info_for_null_check_here(info);
aph@7880 973 }
aph@7880 974 int null_check_here = code_offset();
aph@7880 975 switch (type) {
aph@7880 976 case T_FLOAT: {
aph@7880 977 __ ldrs(dest->as_float_reg(), as_Address(from_addr));
aph@7880 978 break;
aph@7880 979 }
aph@7880 980
aph@7880 981 case T_DOUBLE: {
aph@7880 982 __ ldrd(dest->as_double_reg(), as_Address(from_addr));
aph@7880 983 break;
aph@7880 984 }
aph@7880 985
aph@7880 986 case T_ARRAY: // fall through
aph@7880 987 case T_OBJECT: // fall through
aph@7880 988 if (UseCompressedOops && !wide) {
aph@7880 989 __ ldrw(dest->as_register(), as_Address(from_addr));
aph@7880 990 } else {
aph@7880 991 __ ldr(dest->as_register(), as_Address(from_addr));
aph@7880 992 }
aph@7880 993 break;
aph@7880 994 case T_METADATA:
aph@7880 995 // We get here to store a method pointer to the stack to pass to
aph@7880 996 // a dtrace runtime call. This can't work on 64 bit with
aph@7880 997 // compressed klass ptrs: T_METADATA can be a compressed klass
aph@7880 998 // ptr or a 64 bit method pointer.
aph@7880 999 ShouldNotReachHere();
aph@7880 1000 __ ldr(dest->as_register(), as_Address(from_addr));
aph@7880 1001 break;
aph@7880 1002 case T_ADDRESS:
aph@7880 1003 // FIXME: OMG this is a horrible kludge. Any offset from an
aph@7880 1004 // address that matches klass_offset_in_bytes() will be loaded
aph@7880 1005 // as a word, not a long.
aph@7880 1006 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
aph@7880 1007 __ ldrw(dest->as_register(), as_Address(from_addr));
aph@7880 1008 } else {
aph@7880 1009 __ ldr(dest->as_register(), as_Address(from_addr));
aph@7880 1010 }
aph@7880 1011 break;
aph@7880 1012 case T_INT:
aph@7880 1013 __ ldrw(dest->as_register(), as_Address(from_addr));
aph@7880 1014 break;
aph@7880 1015
aph@7880 1016 case T_LONG: {
aph@7880 1017 __ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
aph@7880 1018 break;
aph@7880 1019 }
aph@7880 1020
aph@7880 1021 case T_BYTE:
aph@7880 1022 __ ldrsb(dest->as_register(), as_Address(from_addr));
aph@7880 1023 break;
aph@7880 1024 case T_BOOLEAN: {
aph@7880 1025 __ ldrb(dest->as_register(), as_Address(from_addr));
aph@7880 1026 break;
aph@7880 1027 }
aph@7880 1028
aph@7880 1029 case T_CHAR:
aph@7880 1030 __ ldrh(dest->as_register(), as_Address(from_addr));
aph@7880 1031 break;
aph@7880 1032 case T_SHORT:
aph@7880 1033 __ ldrsh(dest->as_register(), as_Address(from_addr));
aph@7880 1034 break;
aph@7880 1035
aph@7880 1036 default:
aph@7880 1037 ShouldNotReachHere();
aph@7880 1038 }
aph@7880 1039
aph@7880 1040 if (type == T_ARRAY || type == T_OBJECT) {
aph@7880 1041 if (UseCompressedOops && !wide) {
aph@7880 1042 __ decode_heap_oop(dest->as_register());
aph@7880 1043 }
aph@7880 1044 __ verify_oop(dest->as_register());
aph@7880 1045 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
aph@7880 1046 if (UseCompressedClassPointers) {
aph@7880 1047 __ decode_klass_not_null(dest->as_register());
aph@7880 1048 }
aph@7880 1049 }
aph@7880 1050 }
aph@7880 1051
aph@7880 1052
aph@7880 1053 int LIR_Assembler::array_element_size(BasicType type) const {
aph@7880 1054 int elem_size = type2aelembytes(type);
aph@7880 1055 return exact_log2(elem_size);
aph@7880 1056 }
aph@7880 1057
aph@7880 1058 void LIR_Assembler::emit_op3(LIR_Op3* op) {
aph@7880 1059 Register Rdividend = op->in_opr1()->as_register();
aph@7880 1060 Register Rdivisor = op->in_opr2()->as_register();
aph@7880 1061 Register Rscratch = op->in_opr3()->as_register();
aph@7880 1062 Register Rresult = op->result_opr()->as_register();
aph@7880 1063 int divisor = -1;
aph@7880 1064
aph@7880 1065 /*
aph@7880 1066 TODO: For some reason, using the Rscratch that gets passed in is
aph@7880 1067 not possible because the register allocator does not see the tmp reg
aph@7880 1068 as used, and assignes it the same register as Rdividend. We use rscratch1
aph@7880 1069 instead.
aph@7880 1070
aph@7880 1071 assert(Rdividend != Rscratch, "");
aph@7880 1072 assert(Rdivisor != Rscratch, "");
aph@7880 1073 */
aph@7880 1074
aph@7880 1075 if (Rdivisor == noreg && is_power_of_2(divisor)) {
aph@7880 1076 // convert division by a power of two into some shifts and logical operations
aph@7880 1077 }
aph@7880 1078
aph@7880 1079 if (op->code() == lir_irem) {
aph@7880 1080 __ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1);
aph@7880 1081 } else if (op->code() == lir_idiv) {
aph@7880 1082 __ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1);
aph@7880 1083 } else
aph@7880 1084 ShouldNotReachHere();
aph@7880 1085 }
aph@7880 1086
aph@7880 1087 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
aph@7880 1088 #ifdef ASSERT
aph@7880 1089 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
aph@7880 1090 if (op->block() != NULL) _branch_target_blocks.append(op->block());
aph@7880 1091 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
aph@7880 1092 #endif
aph@7880 1093
aph@7880 1094 if (op->cond() == lir_cond_always) {
aph@7880 1095 if (op->info() != NULL) add_debug_info_for_branch(op->info());
aph@7880 1096 __ b(*(op->label()));
aph@7880 1097 } else {
aph@7880 1098 Assembler::Condition acond;
aph@7880 1099 if (op->code() == lir_cond_float_branch) {
aph@7880 1100 bool is_unordered = (op->ublock() == op->block());
aph@7880 1101 // Assembler::EQ does not permit unordered branches, so we add
aph@7880 1102 // another branch here. Likewise, Assembler::NE does not permit
aph@7880 1103 // ordered branches.
aph@7880 1104 if (is_unordered && op->cond() == lir_cond_equal
aph@7880 1105 || !is_unordered && op->cond() == lir_cond_notEqual)
aph@7880 1106 __ br(Assembler::VS, *(op->ublock()->label()));
aph@7880 1107 switch(op->cond()) {
aph@7880 1108 case lir_cond_equal: acond = Assembler::EQ; break;
aph@7880 1109 case lir_cond_notEqual: acond = Assembler::NE; break;
aph@7880 1110 case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
aph@7880 1111 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
aph@7880 1112 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
aph@7880 1113 case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
aph@7880 1114 default: ShouldNotReachHere();
aph@9792 1115 acond = Assembler::EQ; // unreachable
aph@7880 1116 }
aph@7880 1117 } else {
aph@7880 1118 switch (op->cond()) {
aph@7880 1119 case lir_cond_equal: acond = Assembler::EQ; break;
aph@7880 1120 case lir_cond_notEqual: acond = Assembler::NE; break;
aph@7880 1121 case lir_cond_less: acond = Assembler::LT; break;
aph@7880 1122 case lir_cond_lessEqual: acond = Assembler::LE; break;
aph@7880 1123 case lir_cond_greaterEqual: acond = Assembler::GE; break;
aph@7880 1124 case lir_cond_greater: acond = Assembler::GT; break;
aph@7880 1125 case lir_cond_belowEqual: acond = Assembler::LS; break;
aph@7880 1126 case lir_cond_aboveEqual: acond = Assembler::HS; break;
aph@9792 1127 default: ShouldNotReachHere();
aph@9792 1128 acond = Assembler::EQ; // unreachable
aph@7880 1129 }
aph@7880 1130 }
aph@7880 1131 __ br(acond,*(op->label()));
aph@7880 1132 }
aph@7880 1133 }
aph@7880 1134
aph@7880 1135
aph@7880 1136
aph@7880 1137 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
aph@7880 1138 LIR_Opr src = op->in_opr();
aph@7880 1139 LIR_Opr dest = op->result_opr();
aph@7880 1140
aph@7880 1141 switch (op->bytecode()) {
aph@7880 1142 case Bytecodes::_i2f:
aph@7880 1143 {
aph@7880 1144 __ scvtfws(dest->as_float_reg(), src->as_register());
aph@7880 1145 break;
aph@7880 1146 }
aph@7880 1147 case Bytecodes::_i2d:
aph@7880 1148 {
aph@7880 1149 __ scvtfwd(dest->as_double_reg(), src->as_register());
aph@7880 1150 break;
aph@7880 1151 }
aph@7880 1152 case Bytecodes::_l2d:
aph@7880 1153 {
aph@7880 1154 __ scvtfd(dest->as_double_reg(), src->as_register_lo());
aph@7880 1155 break;
aph@7880 1156 }
aph@7880 1157 case Bytecodes::_l2f:
aph@7880 1158 {
aph@7880 1159 __ scvtfs(dest->as_float_reg(), src->as_register_lo());
aph@7880 1160 break;
aph@7880 1161 }
aph@7880 1162 case Bytecodes::_f2d:
aph@7880 1163 {
aph@7880 1164 __ fcvts(dest->as_double_reg(), src->as_float_reg());
aph@7880 1165 break;
aph@7880 1166 }
aph@7880 1167 case Bytecodes::_d2f:
aph@7880 1168 {
aph@7880 1169 __ fcvtd(dest->as_float_reg(), src->as_double_reg());
aph@7880 1170 break;
aph@7880 1171 }
aph@7880 1172 case Bytecodes::_i2c:
aph@7880 1173 {
aph@7880 1174 __ ubfx(dest->as_register(), src->as_register(), 0, 16);
aph@7880 1175 break;
aph@7880 1176 }
aph@7880 1177 case Bytecodes::_i2l:
aph@7880 1178 {
aph@7880 1179 __ sxtw(dest->as_register_lo(), src->as_register());
aph@7880 1180 break;
aph@7880 1181 }
aph@7880 1182 case Bytecodes::_i2s:
aph@7880 1183 {
aph@7880 1184 __ sxth(dest->as_register(), src->as_register());
aph@7880 1185 break;
aph@7880 1186 }
aph@7880 1187 case Bytecodes::_i2b:
aph@7880 1188 {
aph@7880 1189 __ sxtb(dest->as_register(), src->as_register());
aph@7880 1190 break;
aph@7880 1191 }
aph@7880 1192 case Bytecodes::_l2i:
aph@7880 1193 {
aph@7880 1194 _masm->block_comment("FIXME: This could be a no-op");
aph@7880 1195 __ uxtw(dest->as_register(), src->as_register_lo());
aph@7880 1196 break;
aph@7880 1197 }
aph@7880 1198 case Bytecodes::_d2l:
aph@7880 1199 {
aph@7880 1200 __ fcvtzd(dest->as_register_lo(), src->as_double_reg());
aph@7880 1201 break;
aph@7880 1202 }
aph@7880 1203 case Bytecodes::_f2i:
aph@7880 1204 {
aph@7880 1205 __ fcvtzsw(dest->as_register(), src->as_float_reg());
aph@7880 1206 break;
aph@7880 1207 }
aph@7880 1208 case Bytecodes::_f2l:
aph@7880 1209 {
aph@7880 1210 __ fcvtzs(dest->as_register_lo(), src->as_float_reg());
aph@7880 1211 break;
aph@7880 1212 }
aph@7880 1213 case Bytecodes::_d2i:
aph@7880 1214 {
aph@7880 1215 __ fcvtzdw(dest->as_register(), src->as_double_reg());
aph@7880 1216 break;
aph@7880 1217 }
aph@7880 1218 default: ShouldNotReachHere();
aph@7880 1219 }
aph@7880 1220 }
aph@7880 1221
aph@7880 1222 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
aph@7880 1223 if (op->init_check()) {
aph@7880 1224 __ ldrb(rscratch1, Address(op->klass()->as_register(),
aph@7880 1225 InstanceKlass::init_state_offset()));
aph@7880 1226 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
aph@7880 1227 add_debug_info_for_null_check_here(op->stub()->info());
aph@7880 1228 __ br(Assembler::NE, *op->stub()->entry());
aph@7880 1229 }
aph@7880 1230 __ allocate_object(op->obj()->as_register(),
aph@7880 1231 op->tmp1()->as_register(),
aph@7880 1232 op->tmp2()->as_register(),
aph@7880 1233 op->header_size(),
aph@7880 1234 op->object_size(),
aph@7880 1235 op->klass()->as_register(),
aph@7880 1236 *op->stub()->entry());
aph@7880 1237 __ bind(*op->stub()->continuation());
aph@7880 1238 }
aph@7880 1239
aph@7880 1240 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
aph@7880 1241 Register len = op->len()->as_register();
aph@7880 1242 __ uxtw(len, len);
aph@7880 1243
aph@7880 1244 if (UseSlowPath ||
aph@7880 1245 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
aph@7880 1246 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
aph@7880 1247 __ b(*op->stub()->entry());
aph@7880 1248 } else {
aph@7880 1249 Register tmp1 = op->tmp1()->as_register();
aph@7880 1250 Register tmp2 = op->tmp2()->as_register();
aph@7880 1251 Register tmp3 = op->tmp3()->as_register();
aph@7880 1252 if (len == tmp1) {
aph@7880 1253 tmp1 = tmp3;
aph@7880 1254 } else if (len == tmp2) {
aph@7880 1255 tmp2 = tmp3;
aph@7880 1256 } else if (len == tmp3) {
aph@7880 1257 // everything is ok
aph@7880 1258 } else {
aph@7880 1259 __ mov(tmp3, len);
aph@7880 1260 }
aph@7880 1261 __ allocate_array(op->obj()->as_register(),
aph@7880 1262 len,
aph@7880 1263 tmp1,
aph@7880 1264 tmp2,
aph@7880 1265 arrayOopDesc::header_size(op->type()),
aph@7880 1266 array_element_size(op->type()),
aph@7880 1267 op->klass()->as_register(),
aph@7880 1268 *op->stub()->entry());
aph@7880 1269 }
aph@7880 1270 __ bind(*op->stub()->continuation());
aph@7880 1271 }
aph@7880 1272
aph@7880 1273 void LIR_Assembler::type_profile_helper(Register mdo,
aph@7880 1274 ciMethodData *md, ciProfileData *data,
aph@7880 1275 Register recv, Label* update_done) {
aph@7880 1276 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
aph@7880 1277 Label next_test;
aph@7880 1278 // See if the receiver is receiver[n].
aph@7880 1279 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
aph@7880 1280 __ ldr(rscratch1, Address(rscratch2));
aph@7880 1281 __ cmp(recv, rscratch1);
aph@7880 1282 __ br(Assembler::NE, next_test);
aph@7880 1283 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
aph@7880 1284 __ addptr(data_addr, DataLayout::counter_increment);
aph@7880 1285 __ b(*update_done);
aph@7880 1286 __ bind(next_test);
aph@7880 1287 }
aph@7880 1288
aph@7880 1289 // Didn't find receiver; find next empty slot and fill it in
aph@7880 1290 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
aph@7880 1291 Label next_test;
aph@7880 1292 __ lea(rscratch2,
aph@7880 1293 Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
aph@7880 1294 Address recv_addr(rscratch2);
aph@7880 1295 __ ldr(rscratch1, recv_addr);
aph@7880 1296 __ cbnz(rscratch1, next_test);
aph@7880 1297 __ str(recv, recv_addr);
aph@7880 1298 __ mov(rscratch1, DataLayout::counter_increment);
aph@7880 1299 __ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
aph@7880 1300 __ str(rscratch1, Address(rscratch2));
aph@7880 1301 __ b(*update_done);
aph@7880 1302 __ bind(next_test);
aph@7880 1303 }
aph@7880 1304 }
aph@7880 1305
aph@7880 1306 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
aph@7880 1307 // we always need a stub for the failure case.
aph@7880 1308 CodeStub* stub = op->stub();
aph@7880 1309 Register obj = op->object()->as_register();
aph@7880 1310 Register k_RInfo = op->tmp1()->as_register();
aph@7880 1311 Register klass_RInfo = op->tmp2()->as_register();
aph@7880 1312 Register dst = op->result_opr()->as_register();
aph@7880 1313 ciKlass* k = op->klass();
aph@7880 1314 Register Rtmp1 = noreg;
aph@7880 1315
aph@7880 1316 // check if it needs to be profiled
aph@7880 1317 ciMethodData* md;
aph@7880 1318 ciProfileData* data;
aph@7880 1319
aph@9792 1320 const bool should_profile = op->should_profile();
aph@9792 1321
aph@9792 1322 if (should_profile) {
aph@7880 1323 ciMethod* method = op->profiled_method();
aph@7880 1324 assert(method != NULL, "Should have method");
aph@7880 1325 int bci = op->profiled_bci();
aph@7880 1326 md = method->method_data_or_null();
aph@7880 1327 assert(md != NULL, "Sanity");
aph@7880 1328 data = md->bci_to_data(bci);
aph@7880 1329 assert(data != NULL, "need data for type check");
aph@7880 1330 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
aph@7880 1331 }
aph@7880 1332 Label profile_cast_success, profile_cast_failure;
aph@9792 1333 Label *success_target = should_profile ? &profile_cast_success : success;
aph@9792 1334 Label *failure_target = should_profile ? &profile_cast_failure : failure;
aph@7880 1335
aph@7880 1336 if (obj == k_RInfo) {
aph@7880 1337 k_RInfo = dst;
aph@7880 1338 } else if (obj == klass_RInfo) {
aph@7880 1339 klass_RInfo = dst;
aph@7880 1340 }
aph@7880 1341 if (k->is_loaded() && !UseCompressedClassPointers) {
aph@7880 1342 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
aph@7880 1343 } else {
aph@7880 1344 Rtmp1 = op->tmp3()->as_register();
aph@7880 1345 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
aph@7880 1346 }
aph@7880 1347
aph@7880 1348 assert_different_registers(obj, k_RInfo, klass_RInfo);
aph@7880 1349
aph@9792 1350 if (should_profile) {
aph@7880 1351 Label not_null;
aph@7880 1352 __ cbnz(obj, not_null);
aph@7880 1353 // Object is null; update MDO and exit
aph@7880 1354 Register mdo = klass_RInfo;
aph@7880 1355 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1356 Address data_addr
aph@7880 1357 = __ form_address(rscratch2, mdo,
aph@7880 1358 md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
aph@7880 1359 LogBytesPerWord);
aph@7880 1360 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
aph@7880 1361 __ ldr(rscratch1, data_addr);
aph@7880 1362 __ orr(rscratch1, rscratch1, header_bits);
aph@7880 1363 __ str(rscratch1, data_addr);
aph@7880 1364 __ b(*obj_is_null);
aph@7880 1365 __ bind(not_null);
aph@7880 1366 } else {
aph@7880 1367 __ cbz(obj, *obj_is_null);
aph@7880 1368 }
aph@7880 1369
aph@7880 1370 if (!k->is_loaded()) {
aph@7880 1371 klass2reg_with_patching(k_RInfo, op->info_for_patch());
aph@7880 1372 } else {
aph@7880 1373 __ mov_metadata(k_RInfo, k->constant_encoding());
aph@7880 1374 }
aph@7880 1375 __ verify_oop(obj);
aph@7880 1376
aph@7880 1377 if (op->fast_check()) {
aph@7880 1378 // get object class
aph@7880 1379 // not a safepoint as obj null check happens earlier
aph@7880 1380 __ load_klass(rscratch1, obj);
aph@7880 1381 __ cmp( rscratch1, k_RInfo);
aph@7880 1382
aph@7880 1383 __ br(Assembler::NE, *failure_target);
aph@7880 1384 // successful cast, fall through to profile or jump
aph@7880 1385 } else {
aph@7880 1386 // get object class
aph@7880 1387 // not a safepoint as obj null check happens earlier
aph@7880 1388 __ load_klass(klass_RInfo, obj);
aph@7880 1389 if (k->is_loaded()) {
aph@7880 1390 // See if we get an immediate positive hit
aph@7880 1391 __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
aph@7880 1392 __ cmp(k_RInfo, rscratch1);
aph@7880 1393 if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
aph@7880 1394 __ br(Assembler::NE, *failure_target);
aph@7880 1395 // successful cast, fall through to profile or jump
aph@7880 1396 } else {
aph@7880 1397 // See if we get an immediate positive hit
aph@7880 1398 __ br(Assembler::EQ, *success_target);
aph@7880 1399 // check for self
aph@7880 1400 __ cmp(klass_RInfo, k_RInfo);
aph@7880 1401 __ br(Assembler::EQ, *success_target);
aph@7880 1402
aph@7880 1403 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
aph@7880 1404 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
aph@7880 1405 __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
aph@7880 1406 // result is a boolean
aph@7880 1407 __ cbzw(klass_RInfo, *failure_target);
aph@7880 1408 // successful cast, fall through to profile or jump
aph@7880 1409 }
aph@7880 1410 } else {
aph@7880 1411 // perform the fast part of the checking logic
aph@7880 1412 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
aph@7880 1413 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
aph@7880 1414 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
aph@7880 1415 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
aph@7880 1416 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
aph@7880 1417 // result is a boolean
aph@7880 1418 __ cbz(k_RInfo, *failure_target);
aph@7880 1419 // successful cast, fall through to profile or jump
aph@7880 1420 }
aph@7880 1421 }
aph@9792 1422 if (should_profile) {
aph@7880 1423 Register mdo = klass_RInfo, recv = k_RInfo;
aph@7880 1424 __ bind(profile_cast_success);
aph@7880 1425 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1426 __ load_klass(recv, obj);
aph@7880 1427 Label update_done;
aph@7880 1428 type_profile_helper(mdo, md, data, recv, success);
aph@7880 1429 __ b(*success);
aph@7880 1430
aph@7880 1431 __ bind(profile_cast_failure);
aph@7880 1432 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1433 Address counter_addr
aph@7880 1434 = __ form_address(rscratch2, mdo,
aph@7880 1435 md->byte_offset_of_slot(data, CounterData::count_offset()),
aph@7880 1436 LogBytesPerWord);
aph@7880 1437 __ ldr(rscratch1, counter_addr);
aph@7880 1438 __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
aph@7880 1439 __ str(rscratch1, counter_addr);
aph@7880 1440 __ b(*failure);
aph@7880 1441 }
aph@7880 1442 __ b(*success);
aph@7880 1443 }
aph@7880 1444
aph@7880 1445
aph@7880 1446 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
aph@9792 1447 const bool should_profile = op->should_profile();
aph@9792 1448
aph@7880 1449 LIR_Code code = op->code();
aph@7880 1450 if (code == lir_store_check) {
aph@7880 1451 Register value = op->object()->as_register();
aph@7880 1452 Register array = op->array()->as_register();
aph@7880 1453 Register k_RInfo = op->tmp1()->as_register();
aph@7880 1454 Register klass_RInfo = op->tmp2()->as_register();
aph@7880 1455 Register Rtmp1 = op->tmp3()->as_register();
aph@7880 1456
aph@7880 1457 CodeStub* stub = op->stub();
aph@7880 1458
aph@7880 1459 // check if it needs to be profiled
aph@7880 1460 ciMethodData* md;
aph@7880 1461 ciProfileData* data;
aph@7880 1462
aph@9792 1463 if (should_profile) {
aph@7880 1464 ciMethod* method = op->profiled_method();
aph@7880 1465 assert(method != NULL, "Should have method");
aph@7880 1466 int bci = op->profiled_bci();
aph@7880 1467 md = method->method_data_or_null();
aph@7880 1468 assert(md != NULL, "Sanity");
aph@7880 1469 data = md->bci_to_data(bci);
aph@7880 1470 assert(data != NULL, "need data for type check");
aph@7880 1471 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
aph@7880 1472 }
aph@7880 1473 Label profile_cast_success, profile_cast_failure, done;
aph@9792 1474 Label *success_target = should_profile ? &profile_cast_success : &done;
aph@9792 1475 Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();
aph@9792 1476
aph@9792 1477 if (should_profile) {
aph@7880 1478 Label not_null;
aph@7880 1479 __ cbnz(value, not_null);
aph@7880 1480 // Object is null; update MDO and exit
aph@7880 1481 Register mdo = klass_RInfo;
aph@7880 1482 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1483 Address data_addr
aph@7880 1484 = __ form_address(rscratch2, mdo,
aph@7880 1485 md->byte_offset_of_slot(data, DataLayout::header_offset()),
aph@7880 1486 LogBytesPerInt);
aph@7880 1487 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
aph@7880 1488 __ ldrw(rscratch1, data_addr);
aph@7880 1489 __ orrw(rscratch1, rscratch1, header_bits);
aph@7880 1490 __ strw(rscratch1, data_addr);
aph@7880 1491 __ b(done);
aph@7880 1492 __ bind(not_null);
aph@7880 1493 } else {
aph@7880 1494 __ cbz(value, done);
aph@7880 1495 }
aph@7880 1496
aph@7880 1497 add_debug_info_for_null_check_here(op->info_for_exception());
aph@7880 1498 __ load_klass(k_RInfo, array);
aph@7880 1499 __ load_klass(klass_RInfo, value);
aph@7880 1500
aph@7880 1501 // get instance klass (it's already uncompressed)
aph@7880 1502 __ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
aph@7880 1503 // perform the fast part of the checking logic
aph@7880 1504 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
aph@7880 1505 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
aph@7880 1506 __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
aph@7880 1507 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
aph@7880 1508 __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
aph@7880 1509 // result is a boolean
aph@7880 1510 __ cbzw(k_RInfo, *failure_target);
aph@7880 1511 // fall through to the success case
aph@7880 1512
aph@9792 1513 if (should_profile) {
aph@7880 1514 Register mdo = klass_RInfo, recv = k_RInfo;
aph@7880 1515 __ bind(profile_cast_success);
aph@7880 1516 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1517 __ load_klass(recv, value);
aph@7880 1518 Label update_done;
aph@7880 1519 type_profile_helper(mdo, md, data, recv, &done);
aph@7880 1520 __ b(done);
aph@7880 1521
aph@7880 1522 __ bind(profile_cast_failure);
aph@7880 1523 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 1524 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
aph@7880 1525 __ lea(rscratch2, counter_addr);
aph@7880 1526 __ ldr(rscratch1, Address(rscratch2));
aph@7880 1527 __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
aph@7880 1528 __ str(rscratch1, Address(rscratch2));
aph@7880 1529 __ b(*stub->entry());
aph@7880 1530 }
aph@7880 1531
aph@7880 1532 __ bind(done);
aph@7880 1533 } else if (code == lir_checkcast) {
aph@7880 1534 Register obj = op->object()->as_register();
aph@7880 1535 Register dst = op->result_opr()->as_register();
aph@7880 1536 Label success;
aph@7880 1537 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
aph@7880 1538 __ bind(success);
aph@7880 1539 if (dst != obj) {
aph@7880 1540 __ mov(dst, obj);
aph@7880 1541 }
aph@7880 1542 } else if (code == lir_instanceof) {
aph@7880 1543 Register obj = op->object()->as_register();
aph@7880 1544 Register dst = op->result_opr()->as_register();
aph@7880 1545 Label success, failure, done;
aph@7880 1546 emit_typecheck_helper(op, &success, &failure, &failure);
aph@7880 1547 __ bind(failure);
aph@7880 1548 __ mov(dst, zr);
aph@7880 1549 __ b(done);
aph@7880 1550 __ bind(success);
aph@7880 1551 __ mov(dst, 1);
aph@7880 1552 __ bind(done);
aph@7880 1553 } else {
aph@7880 1554 ShouldNotReachHere();
aph@7880 1555 }
aph@7880 1556 }
aph@7880 1557
aph@7880 1558 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
aph@11697 1559 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
enevill@10782 1560 __ cset(rscratch1, Assembler::NE);
aph@7880 1561 __ membar(__ AnyAny);
aph@7880 1562 }
aph@7880 1563
aph@7880 1564 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
aph@11697 1565 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
enevill@10782 1566 __ cset(rscratch1, Assembler::NE);
aph@7880 1567 __ membar(__ AnyAny);
aph@7880 1568 }
aph@7880 1569
aph@7880 1570
aph@7880 1571 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
aph@7880 1572 assert(VM_Version::supports_cx8(), "wrong machine");
aph@7880 1573 Register addr = as_reg(op->addr());
aph@7880 1574 Register newval = as_reg(op->new_value());
aph@7880 1575 Register cmpval = as_reg(op->cmp_value());
aph@7880 1576 Label succeed, fail, around;
aph@7880 1577
aph@7880 1578 if (op->code() == lir_cas_obj) {
aph@7880 1579 if (UseCompressedOops) {
aph@7880 1580 Register t1 = op->tmp1()->as_register();
aph@7880 1581 assert(op->tmp1()->is_valid(), "must be");
aph@7880 1582 __ encode_heap_oop(t1, cmpval);
aph@7880 1583 cmpval = t1;
aph@7880 1584 __ encode_heap_oop(rscratch2, newval);
aph@7880 1585 newval = rscratch2;
aph@7880 1586 casw(addr, newval, cmpval);
aph@7880 1587 } else {
aph@7880 1588 casl(addr, newval, cmpval);
aph@7880 1589 }
aph@7880 1590 } else if (op->code() == lir_cas_int) {
aph@7880 1591 casw(addr, newval, cmpval);
aph@7880 1592 } else {
aph@7880 1593 casl(addr, newval, cmpval);
aph@7880 1594 }
aph@7880 1595 }
aph@7880 1596
aph@7880 1597
aph@7880 1598 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
aph@7880 1599
aph@7880 1600 Assembler::Condition acond, ncond;
aph@7880 1601 switch (condition) {
aph@7880 1602 case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
aph@7880 1603 case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
aph@7880 1604 case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
aph@7880 1605 case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
aph@7880 1606 case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
aph@7880 1607 case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
aph@9792 1608 case lir_cond_belowEqual:
aph@9792 1609 case lir_cond_aboveEqual:
aph@7880 1610 default: ShouldNotReachHere();
aph@9792 1611 acond = Assembler::EQ; ncond = Assembler::NE; // unreachable
aph@7880 1612 }
aph@7880 1613
aph@7880 1614 assert(result->is_single_cpu() || result->is_double_cpu(),
aph@7880 1615 "expect single register for result");
aph@7880 1616 if (opr1->is_constant() && opr2->is_constant()
aph@7880 1617 && opr1->type() == T_INT && opr2->type() == T_INT) {
aph@7880 1618 jint val1 = opr1->as_jint();
aph@7880 1619 jint val2 = opr2->as_jint();
aph@7880 1620 if (val1 == 0 && val2 == 1) {
aph@7880 1621 __ cset(result->as_register(), ncond);
aph@7880 1622 return;
aph@7880 1623 } else if (val1 == 1 && val2 == 0) {
aph@7880 1624 __ cset(result->as_register(), acond);
aph@7880 1625 return;
aph@7880 1626 }
aph@7880 1627 }
aph@7880 1628
aph@7880 1629 if (opr1->is_constant() && opr2->is_constant()
aph@7880 1630 && opr1->type() == T_LONG && opr2->type() == T_LONG) {
aph@7880 1631 jlong val1 = opr1->as_jlong();
aph@7880 1632 jlong val2 = opr2->as_jlong();
aph@7880 1633 if (val1 == 0 && val2 == 1) {
aph@7880 1634 __ cset(result->as_register_lo(), ncond);
aph@7880 1635 return;
aph@7880 1636 } else if (val1 == 1 && val2 == 0) {
aph@7880 1637 __ cset(result->as_register_lo(), acond);
aph@7880 1638 return;
aph@7880 1639 }
aph@7880 1640 }
aph@7880 1641
aph@7880 1642 if (opr1->is_stack()) {
aph@7880 1643 stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
aph@7880 1644 opr1 = FrameMap::rscratch1_opr;
aph@7880 1645 } else if (opr1->is_constant()) {
aph@7880 1646 LIR_Opr tmp
aph@7880 1647 = opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
aph@7880 1648 const2reg(opr1, tmp, lir_patch_none, NULL);
aph@7880 1649 opr1 = tmp;
aph@7880 1650 }
aph@7880 1651
aph@7880 1652 if (opr2->is_stack()) {
aph@7880 1653 stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
aph@7880 1654 opr2 = FrameMap::rscratch2_opr;
aph@7880 1655 } else if (opr2->is_constant()) {
aph@7880 1656 LIR_Opr tmp
aph@7880 1657 = opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
aph@7880 1658 const2reg(opr2, tmp, lir_patch_none, NULL);
aph@7880 1659 opr2 = tmp;
aph@7880 1660 }
aph@7880 1661
aph@7880 1662 if (result->type() == T_LONG)
aph@7880 1663 __ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
aph@7880 1664 else
aph@7880 1665 __ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
aph@7880 1666 }
aph@7880 1667
aph@7880 1668 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
aph@7880 1669 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
aph@7880 1670
aph@7880 1671 if (left->is_single_cpu()) {
aph@7880 1672 Register lreg = left->as_register();
aph@7880 1673 Register dreg = as_reg(dest);
aph@7880 1674
aph@7880 1675 if (right->is_single_cpu()) {
aph@7880 1676 // cpu register - cpu register
aph@7880 1677
aph@7880 1678 assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
aph@7880 1679 "should be");
aph@7880 1680 Register rreg = right->as_register();
aph@7880 1681 switch (code) {
aph@7880 1682 case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
aph@7880 1683 case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
aph@7880 1684 case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
aph@7880 1685 default: ShouldNotReachHere();
aph@7880 1686 }
aph@7880 1687
aph@7880 1688 } else if (right->is_double_cpu()) {
aph@7880 1689 Register rreg = right->as_register_lo();
aph@7880 1690 // single_cpu + double_cpu: can happen with obj+long
aph@7880 1691 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
aph@7880 1692 switch (code) {
aph@7880 1693 case lir_add: __ add(dreg, lreg, rreg); break;
aph@7880 1694 case lir_sub: __ sub(dreg, lreg, rreg); break;
aph@7880 1695 default: ShouldNotReachHere();
aph@7880 1696 }
aph@7880 1697 } else if (right->is_constant()) {
aph@7880 1698 // cpu register - constant
aph@7880 1699 jlong c;
aph@7880 1700
aph@7880 1701 // FIXME. This is fugly: we really need to factor all this logic.
aph@7880 1702 switch(right->type()) {
aph@7880 1703 case T_LONG:
aph@7880 1704 c = right->as_constant_ptr()->as_jlong();
aph@7880 1705 break;
aph@7880 1706 case T_INT:
aph@7880 1707 case T_ADDRESS:
aph@7880 1708 c = right->as_constant_ptr()->as_jint();
aph@7880 1709 break;
aph@7880 1710 default:
aph@7880 1711 ShouldNotReachHere();
aph@9792 1712 c = 0; // unreachable
aph@7880 1713 break;
aph@7880 1714 }
aph@7880 1715
aph@7880 1716 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
aph@7880 1717 if (c == 0 && dreg == lreg) {
aph@7880 1718 COMMENT("effective nop elided");
aph@7880 1719 return;
aph@7880 1720 }
aph@7880 1721 switch(left->type()) {
aph@7880 1722 case T_INT:
aph@7880 1723 switch (code) {
aph@7880 1724 case lir_add: __ addw(dreg, lreg, c); break;
aph@7880 1725 case lir_sub: __ subw(dreg, lreg, c); break;
aph@7880 1726 default: ShouldNotReachHere();
aph@7880 1727 }
aph@7880 1728 break;
aph@7880 1729 case T_OBJECT:
aph@7880 1730 case T_ADDRESS:
aph@7880 1731 switch (code) {
aph@7880 1732 case lir_add: __ add(dreg, lreg, c); break;
aph@7880 1733 case lir_sub: __ sub(dreg, lreg, c); break;
aph@7880 1734 default: ShouldNotReachHere();
aph@7880 1735 }
aph@7880 1736 break;
aph@7880 1737 ShouldNotReachHere();
aph@7880 1738 }
aph@7880 1739 } else {
aph@7880 1740 ShouldNotReachHere();
aph@7880 1741 }
aph@7880 1742
aph@7880 1743 } else if (left->is_double_cpu()) {
aph@7880 1744 Register lreg_lo = left->as_register_lo();
aph@7880 1745
aph@7880 1746 if (right->is_double_cpu()) {
aph@7880 1747 // cpu register - cpu register
aph@7880 1748 Register rreg_lo = right->as_register_lo();
aph@7880 1749 switch (code) {
aph@7880 1750 case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
aph@7880 1751 case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
aph@7880 1752 case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
aph@7880 1753 case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
aph@7880 1754 case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
aph@7880 1755 default:
aph@7880 1756 ShouldNotReachHere();
aph@7880 1757 }
aph@7880 1758
aph@7880 1759 } else if (right->is_constant()) {
aph@7880 1760 jlong c = right->as_constant_ptr()->as_jlong_bits();
aph@7880 1761 Register dreg = as_reg(dest);
aph@7880 1762 assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
aph@7880 1763 if (c == 0 && dreg == lreg_lo) {
aph@7880 1764 COMMENT("effective nop elided");
aph@7880 1765 return;
aph@7880 1766 }
aph@7880 1767 switch (code) {
aph@7880 1768 case lir_add: __ add(dreg, lreg_lo, c); break;
aph@7880 1769 case lir_sub: __ sub(dreg, lreg_lo, c); break;
aph@7880 1770 default:
aph@7880 1771 ShouldNotReachHere();
aph@7880 1772 }
aph@7880 1773 } else {
aph@7880 1774 ShouldNotReachHere();
aph@7880 1775 }
aph@7880 1776 } else if (left->is_single_fpu()) {
aph@7880 1777 assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
aph@7880 1778 switch (code) {
aph@7880 1779 case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
aph@7880 1780 case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
aph@7880 1781 case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
aph@7880 1782 case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
aph@7880 1783 default:
aph@7880 1784 ShouldNotReachHere();
aph@7880 1785 }
aph@7880 1786 } else if (left->is_double_fpu()) {
aph@7880 1787 if (right->is_double_fpu()) {
aph@7880 1788 // cpu register - cpu register
aph@7880 1789 switch (code) {
aph@7880 1790 case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
aph@7880 1791 case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
aph@7880 1792 case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
aph@7880 1793 case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
aph@7880 1794 default:
aph@7880 1795 ShouldNotReachHere();
aph@7880 1796 }
aph@7880 1797 } else {
aph@7880 1798 if (right->is_constant()) {
aph@7880 1799 ShouldNotReachHere();
aph@7880 1800 }
aph@7880 1801 ShouldNotReachHere();
aph@7880 1802 }
aph@7880 1803 } else if (left->is_single_stack() || left->is_address()) {
aph@7880 1804 assert(left == dest, "left and dest must be equal");
aph@7880 1805 ShouldNotReachHere();
aph@7880 1806 } else {
aph@7880 1807 ShouldNotReachHere();
aph@7880 1808 }
aph@7880 1809 }
aph@7880 1810
aph@7880 1811 void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
aph@7880 1812
aph@7880 1813
aph@7880 1814 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
aph@7880 1815 switch(code) {
aph@7880 1816 case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
aph@7880 1817 case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
aph@7880 1818 default : ShouldNotReachHere();
aph@7880 1819 }
aph@7880 1820 }
aph@7880 1821
aph@7880 1822 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
aph@7880 1823
aph@7880 1824 assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
aph@7880 1825 Register Rleft = left->is_single_cpu() ? left->as_register() :
aph@7880 1826 left->as_register_lo();
aph@7880 1827 if (dst->is_single_cpu()) {
aph@7880 1828 Register Rdst = dst->as_register();
aph@7880 1829 if (right->is_constant()) {
aph@7880 1830 switch (code) {
aph@7880 1831 case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
aph@7880 1832 case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;
aph@7880 1833 case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
aph@7880 1834 default: ShouldNotReachHere(); break;
aph@7880 1835 }
aph@7880 1836 } else {
aph@7880 1837 Register Rright = right->is_single_cpu() ? right->as_register() :
aph@7880 1838 right->as_register_lo();
aph@7880 1839 switch (code) {
aph@7880 1840 case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
aph@7880 1841 case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;
aph@7880 1842 case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
aph@7880 1843 default: ShouldNotReachHere(); break;
aph@7880 1844 }
aph@7880 1845 }
aph@7880 1846 } else {
aph@7880 1847 Register Rdst = dst->as_register_lo();
aph@7880 1848 if (right->is_constant()) {
aph@7880 1849 switch (code) {
aph@7880 1850 case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
aph@7880 1851 case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;
aph@7880 1852 case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
aph@7880 1853 default: ShouldNotReachHere(); break;
aph@7880 1854 }
aph@7880 1855 } else {
aph@7880 1856 Register Rright = right->is_single_cpu() ? right->as_register() :
aph@7880 1857 right->as_register_lo();
aph@7880 1858 switch (code) {
aph@7880 1859 case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
aph@7880 1860 case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
aph@7880 1861 case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
aph@7880 1862 default: ShouldNotReachHere(); break;
aph@7880 1863 }
aph@7880 1864 }
aph@7880 1865 }
aph@7880 1866 }
aph@7880 1867
aph@7880 1868
aph@7880 1869
aph@7880 1870 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
aph@7880 1871
aph@7880 1872
aph@7880 1873 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
aph@7880 1874 if (opr1->is_constant() && opr2->is_single_cpu()) {
aph@7880 1875 // tableswitch
aph@7880 1876 Register reg = as_reg(opr2);
aph@7880 1877 struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
aph@7880 1878 __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
aph@7880 1879 } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
aph@7880 1880 Register reg1 = as_reg(opr1);
aph@7880 1881 if (opr2->is_single_cpu()) {
aph@7880 1882 // cpu register - cpu register
aph@7880 1883 Register reg2 = opr2->as_register();
aph@7880 1884 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
aph@7880 1885 __ cmp(reg1, reg2);
aph@7880 1886 } else {
aph@7880 1887 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
aph@7880 1888 __ cmpw(reg1, reg2);
aph@7880 1889 }
aph@7880 1890 return;
aph@7880 1891 }
aph@7880 1892 if (opr2->is_double_cpu()) {
aph@7880 1893 // cpu register - cpu register
aph@7880 1894 Register reg2 = opr2->as_register_lo();
aph@7880 1895 __ cmp(reg1, reg2);
aph@7880 1896 return;
aph@7880 1897 }
aph@7880 1898
aph@7880 1899 if (opr2->is_constant()) {
aph@7880 1900 jlong imm;
aph@7880 1901 switch(opr2->type()) {
aph@7880 1902 case T_LONG:
aph@7880 1903 imm = opr2->as_constant_ptr()->as_jlong();
aph@7880 1904 break;
aph@7880 1905 case T_INT:
aph@7880 1906 case T_ADDRESS:
aph@7880 1907 imm = opr2->as_constant_ptr()->as_jint();
aph@7880 1908 break;
aph@7880 1909 case T_OBJECT:
aph@7880 1910 case T_ARRAY:
aph@7880 1911 imm = jlong(opr2->as_constant_ptr()->as_jobject());
aph@7880 1912 break;
aph@7880 1913 default:
aph@7880 1914 ShouldNotReachHere();
aph@9792 1915 imm = 0; // unreachable
aph@7880 1916 break;
aph@7880 1917 }
aph@7880 1918
aph@7880 1919 if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
aph@7880 1920 if (type2aelembytes(opr1->type()) <= 4)
aph@7880 1921 __ cmpw(reg1, imm);
aph@7880 1922 else
aph@7880 1923 __ cmp(reg1, imm);
aph@7880 1924 return;
aph@7880 1925 } else {
aph@7880 1926 __ mov(rscratch1, imm);
aph@7880 1927 if (type2aelembytes(opr1->type()) <= 4)
aph@7880 1928 __ cmpw(reg1, rscratch1);
aph@7880 1929 else
aph@7880 1930 __ cmp(reg1, rscratch1);
aph@7880 1931 return;
aph@7880 1932 }
aph@7880 1933 } else
aph@7880 1934 ShouldNotReachHere();
aph@7880 1935 } else if (opr1->is_single_fpu()) {
aph@7880 1936 FloatRegister reg1 = opr1->as_float_reg();
aph@7880 1937 assert(opr2->is_single_fpu(), "expect single float register");
aph@7880 1938 FloatRegister reg2 = opr2->as_float_reg();
aph@7880 1939 __ fcmps(reg1, reg2);
aph@7880 1940 } else if (opr1->is_double_fpu()) {
aph@7880 1941 FloatRegister reg1 = opr1->as_double_reg();
aph@7880 1942 assert(opr2->is_double_fpu(), "expect double float register");
aph@7880 1943 FloatRegister reg2 = opr2->as_double_reg();
aph@7880 1944 __ fcmpd(reg1, reg2);
aph@7880 1945 } else {
aph@7880 1946 ShouldNotReachHere();
aph@7880 1947 }
aph@7880 1948 }
aph@7880 1949
aph@7880 1950 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
aph@7880 1951 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
aph@7880 1952 bool is_unordered_less = (code == lir_ucmp_fd2i);
aph@7880 1953 if (left->is_single_fpu()) {
aph@7880 1954 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
aph@7880 1955 } else if (left->is_double_fpu()) {
aph@7880 1956 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
aph@7880 1957 } else {
aph@7880 1958 ShouldNotReachHere();
aph@7880 1959 }
aph@7880 1960 } else if (code == lir_cmp_l2i) {
aph@7880 1961 Label done;
aph@7880 1962 __ cmp(left->as_register_lo(), right->as_register_lo());
aph@7880 1963 __ mov(dst->as_register(), (u_int64_t)-1L);
aph@7880 1964 __ br(Assembler::LT, done);
aph@7880 1965 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
aph@7880 1966 __ bind(done);
aph@7880 1967 } else {
aph@7880 1968 ShouldNotReachHere();
aph@7880 1969 }
aph@7880 1970 }
aph@7880 1971
aph@7880 1972
aph@7880 1973 void LIR_Assembler::align_call(LIR_Code code) { }
aph@7880 1974
aph@7880 1975
aph@7880 1976 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
thartmann@8794 1977 address call = __ trampoline_call(Address(op->addr(), rtype));
thartmann@8794 1978 if (call == NULL) {
thartmann@8794 1979 bailout("trampoline stub overflow");
thartmann@8794 1980 return;
thartmann@8794 1981 }
aph@7880 1982 add_call_info(code_offset(), op->info());
aph@7880 1983 }
aph@7880 1984
aph@7880 1985
aph@7880 1986 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
thartmann@8794 1987 address call = __ ic_call(op->addr());
thartmann@8794 1988 if (call == NULL) {
thartmann@8794 1989 bailout("trampoline stub overflow");
thartmann@8794 1990 return;
thartmann@8794 1991 }
aph@7880 1992 add_call_info(code_offset(), op->info());
aph@7880 1993 }
aph@7880 1994
aph@7880 1995
aph@7880 1996 /* Currently, vtable-dispatch is only enabled for sparc platforms */
aph@7880 1997 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
aph@7880 1998 ShouldNotReachHere();
aph@7880 1999 }
aph@7880 2000
aph@7880 2001
aph@7880 2002 void LIR_Assembler::emit_static_call_stub() {
aph@7880 2003 address call_pc = __ pc();
aph@7880 2004 address stub = __ start_a_stub(call_stub_size);
aph@7880 2005 if (stub == NULL) {
aph@7880 2006 bailout("static call stub overflow");
aph@7880 2007 return;
aph@7880 2008 }
aph@7880 2009
aph@7880 2010 int start = __ offset();
aph@7880 2011
aph@7880 2012 __ relocate(static_stub_Relocation::spec(call_pc));
aph@7880 2013 __ mov_metadata(rmethod, (Metadata*)NULL);
aph@7880 2014 __ movptr(rscratch1, 0);
aph@7880 2015 __ br(rscratch1);
aph@7880 2016
aph@7880 2017 assert(__ offset() - start <= call_stub_size, "stub too big");
aph@7880 2018 __ end_a_stub();
aph@7880 2019 }
aph@7880 2020
aph@7880 2021
aph@7880 2022 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
aph@7880 2023 assert(exceptionOop->as_register() == r0, "must match");
aph@7880 2024 assert(exceptionPC->as_register() == r3, "must match");
aph@7880 2025
aph@7880 2026 // exception object is not added to oop map by LinearScan
aph@7880 2027 // (LinearScan assumes that no oops are in fixed registers)
aph@7880 2028 info->add_register_oop(exceptionOop);
aph@7880 2029 Runtime1::StubID unwind_id;
aph@7880 2030
aph@7880 2031 // get current pc information
aph@7880 2032 // pc is only needed if the method has an exception handler, the unwind code does not need it.
aph@7880 2033 int pc_for_athrow_offset = __ offset();
aph@7880 2034 InternalAddress pc_for_athrow(__ pc());
aph@7880 2035 __ adr(exceptionPC->as_register(), pc_for_athrow);
aph@7880 2036 add_call_info(pc_for_athrow_offset, info); // for exception handler
aph@7880 2037
aph@7880 2038 __ verify_not_null_oop(r0);
aph@7880 2039 // search an exception handler (r0: exception oop, r3: throwing pc)
aph@7880 2040 if (compilation()->has_fpu_code()) {
aph@7880 2041 unwind_id = Runtime1::handle_exception_id;
aph@7880 2042 } else {
aph@7880 2043 unwind_id = Runtime1::handle_exception_nofpu_id;
aph@7880 2044 }
aph@7880 2045 __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
aph@7880 2046
aph@7880 2047 // FIXME: enough room for two byte trap ????
aph@7880 2048 __ nop();
aph@7880 2049 }
aph@7880 2050
aph@7880 2051
aph@7880 2052 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
aph@7880 2053 assert(exceptionOop->as_register() == r0, "must match");
aph@7880 2054
aph@7880 2055 __ b(_unwind_handler_entry);
aph@7880 2056 }
aph@7880 2057
aph@7880 2058
aph@7880 2059 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
aph@7880 2060 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
aph@7880 2061 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
aph@7880 2062
aph@7880 2063 switch (left->type()) {
aph@7880 2064 case T_INT: {
aph@7880 2065 switch (code) {
aph@7880 2066 case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
aph@7880 2067 case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
aph@7880 2068 case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
aph@7880 2069 default:
aph@7880 2070 ShouldNotReachHere();
aph@7880 2071 break;
aph@7880 2072 }
aph@7880 2073 break;
aph@7880 2074 case T_LONG:
aph@7880 2075 case T_ADDRESS:
aph@7880 2076 case T_OBJECT:
aph@7880 2077 switch (code) {
aph@7880 2078 case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
aph@7880 2079 case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
aph@7880 2080 case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
aph@7880 2081 default:
aph@7880 2082 ShouldNotReachHere();
aph@7880 2083 break;
aph@7880 2084 }
aph@7880 2085 break;
aph@7880 2086 default:
aph@7880 2087 ShouldNotReachHere();
aph@7880 2088 break;
aph@7880 2089 }
aph@7880 2090 }
aph@7880 2091 }
aph@7880 2092
aph@7880 2093
aph@7880 2094 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
aph@7880 2095 Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
aph@7880 2096 Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
aph@7880 2097
aph@7880 2098 switch (left->type()) {
aph@7880 2099 case T_INT: {
aph@7880 2100 switch (code) {
aph@7880 2101 case lir_shl: __ lslw (dreg, lreg, count); break;
aph@7880 2102 case lir_shr: __ asrw (dreg, lreg, count); break;
aph@7880 2103 case lir_ushr: __ lsrw (dreg, lreg, count); break;
aph@7880 2104 default:
aph@7880 2105 ShouldNotReachHere();
aph@7880 2106 break;
aph@7880 2107 }
aph@7880 2108 break;
aph@7880 2109 case T_LONG:
aph@7880 2110 case T_ADDRESS:
aph@7880 2111 case T_OBJECT:
aph@7880 2112 switch (code) {
aph@7880 2113 case lir_shl: __ lsl (dreg, lreg, count); break;
aph@7880 2114 case lir_shr: __ asr (dreg, lreg, count); break;
aph@7880 2115 case lir_ushr: __ lsr (dreg, lreg, count); break;
aph@7880 2116 default:
aph@7880 2117 ShouldNotReachHere();
aph@7880 2118 break;
aph@7880 2119 }
aph@7880 2120 break;
aph@7880 2121 default:
aph@7880 2122 ShouldNotReachHere();
aph@7880 2123 break;
aph@7880 2124 }
aph@7880 2125 }
aph@7880 2126 }
aph@7880 2127
aph@7880 2128
aph@7880 2129 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
aph@7880 2130 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
aph@7880 2131 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
aph@7880 2132 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
aph@7880 2133 __ str (r, Address(sp, offset_from_rsp_in_bytes));
aph@7880 2134 }
aph@7880 2135
aph@7880 2136
aph@7880 2137 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
aph@7880 2138 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
aph@7880 2139 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
aph@7880 2140 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
aph@7880 2141 __ mov (rscratch1, c);
aph@7880 2142 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
aph@7880 2143 }
aph@7880 2144
aph@7880 2145
aph@7880 2146 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
aph@7880 2147 ShouldNotReachHere();
aph@7880 2148 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
aph@7880 2149 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
aph@7880 2150 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
aph@7880 2151 __ lea(rscratch1, __ constant_oop_address(o));
aph@7880 2152 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
aph@7880 2153 }
aph@7880 2154
aph@7880 2155
aph@7880 2156 // This code replaces a call to arraycopy; no exception may
aph@7880 2157 // be thrown in this code, they must be thrown in the System.arraycopy
aph@7880 2158 // activation frame; we could save some checks if this would not be the case
aph@7880 2159 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
aph@7880 2160 ciArrayKlass* default_type = op->expected_type();
aph@7880 2161 Register src = op->src()->as_register();
aph@7880 2162 Register dst = op->dst()->as_register();
aph@7880 2163 Register src_pos = op->src_pos()->as_register();
aph@7880 2164 Register dst_pos = op->dst_pos()->as_register();
aph@7880 2165 Register length = op->length()->as_register();
aph@7880 2166 Register tmp = op->tmp()->as_register();
aph@7880 2167
aph@7880 2168 CodeStub* stub = op->stub();
aph@7880 2169 int flags = op->flags();
aph@7880 2170 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
aph@7880 2171 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
aph@7880 2172
aph@7880 2173 // if we don't know anything, just go through the generic arraycopy
aph@7880 2174 if (default_type == NULL // || basic_type == T_OBJECT
aph@7880 2175 ) {
aph@7880 2176 Label done;
aph@7880 2177 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
aph@7880 2178
aph@7880 2179 // Save the arguments in case the generic arraycopy fails and we
aph@7880 2180 // have to fall back to the JNI stub
aph@7880 2181 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
aph@7880 2182 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
aph@7880 2183 __ str(src, Address(sp, 4*BytesPerWord));
aph@7880 2184
aph@7880 2185 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
aph@7880 2186 address copyfunc_addr = StubRoutines::generic_arraycopy();
aph@7880 2187
aph@7880 2188 // The arguments are in java calling convention so we shift them
aph@7880 2189 // to C convention
aph@7880 2190 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
aph@7880 2191 __ mov(c_rarg0, j_rarg0);
aph@7880 2192 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
aph@7880 2193 __ mov(c_rarg1, j_rarg1);
aph@7880 2194 assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
aph@7880 2195 __ mov(c_rarg2, j_rarg2);
aph@7880 2196 assert_different_registers(c_rarg3, j_rarg4);
aph@7880 2197 __ mov(c_rarg3, j_rarg3);
aph@7880 2198 __ mov(c_rarg4, j_rarg4);
aph@7880 2199 if (copyfunc_addr == NULL) { // Use C version if stub was not generated
aph@7880 2200 __ mov(rscratch1, RuntimeAddress(C_entry));
aph@7880 2201 __ blrt(rscratch1, 5, 0, 1);
aph@7880 2202 } else {
aph@7880 2203 #ifndef PRODUCT
aph@7880 2204 if (PrintC1Statistics) {
aph@7880 2205 __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
aph@7880 2206 }
aph@7880 2207 #endif
aph@7880 2208 __ far_call(RuntimeAddress(copyfunc_addr));
aph@7880 2209 }
aph@7880 2210
aph@7880 2211 __ cbz(r0, *stub->continuation());
aph@7880 2212
aph@7880 2213 // Reload values from the stack so they are where the stub
aph@7880 2214 // expects them.
aph@7880 2215 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
aph@7880 2216 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
aph@7880 2217 __ ldr(src, Address(sp, 4*BytesPerWord));
aph@7880 2218
aph@7880 2219 if (copyfunc_addr != NULL) {
aph@7880 2220 // r0 is -1^K where K == partial copied count
aph@7880 2221 __ eonw(rscratch1, r0, 0);
aph@7880 2222 // adjust length down and src/end pos up by partial copied count
aph@7880 2223 __ subw(length, length, rscratch1);
aph@7880 2224 __ addw(src_pos, src_pos, rscratch1);
aph@7880 2225 __ addw(dst_pos, dst_pos, rscratch1);
aph@7880 2226 }
aph@7880 2227 __ b(*stub->entry());
aph@7880 2228
aph@7880 2229 __ bind(*stub->continuation());
aph@7880 2230 return;
aph@7880 2231 }
aph@7880 2232
aph@7880 2233 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
aph@7880 2234
aph@7880 2235 int elem_size = type2aelembytes(basic_type);
aph@7880 2236 int shift_amount;
aph@7880 2237 int scale = exact_log2(elem_size);
aph@7880 2238
aph@7880 2239 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
aph@7880 2240 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
aph@7880 2241 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
aph@7880 2242 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
aph@7880 2243
aph@7880 2244 // test for NULL
aph@7880 2245 if (flags & LIR_OpArrayCopy::src_null_check) {
aph@7880 2246 __ cbz(src, *stub->entry());
aph@7880 2247 }
aph@7880 2248 if (flags & LIR_OpArrayCopy::dst_null_check) {
aph@7880 2249 __ cbz(dst, *stub->entry());
aph@7880 2250 }
aph@7880 2251
aph@7880 2252 // check if negative
aph@7880 2253 if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
aph@7880 2254 __ cmpw(src_pos, 0);
aph@7880 2255 __ br(Assembler::LT, *stub->entry());
aph@7880 2256 }
aph@7880 2257 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
aph@7880 2258 __ cmpw(dst_pos, 0);
aph@7880 2259 __ br(Assembler::LT, *stub->entry());
aph@7880 2260 }
aph@7880 2261
aph@7880 2262 if (flags & LIR_OpArrayCopy::length_positive_check) {
aph@7880 2263 __ cmpw(length, 0);
aph@7880 2264 __ br(Assembler::LT, *stub->entry());
aph@7880 2265 }
aph@7880 2266
aph@7880 2267 if (flags & LIR_OpArrayCopy::src_range_check) {
aph@7880 2268 __ addw(tmp, src_pos, length);
aph@7880 2269 __ ldrw(rscratch1, src_length_addr);
aph@7880 2270 __ cmpw(tmp, rscratch1);
aph@7880 2271 __ br(Assembler::HI, *stub->entry());
aph@7880 2272 }
aph@7880 2273 if (flags & LIR_OpArrayCopy::dst_range_check) {
aph@7880 2274 __ addw(tmp, dst_pos, length);
aph@7880 2275 __ ldrw(rscratch1, dst_length_addr);
aph@7880 2276 __ cmpw(tmp, rscratch1);
aph@7880 2277 __ br(Assembler::HI, *stub->entry());
aph@7880 2278 }
aph@7880 2279
aph@7880 2280 if (flags & LIR_OpArrayCopy::type_check) {
aph@7880 2281 // We don't know the array types are compatible
aph@7880 2282 if (basic_type != T_OBJECT) {
aph@7880 2283 // Simple test for basic type arrays
aph@7880 2284 if (UseCompressedClassPointers) {
aph@7880 2285 __ ldrw(tmp, src_klass_addr);
aph@7880 2286 __ ldrw(rscratch1, dst_klass_addr);
aph@7880 2287 __ cmpw(tmp, rscratch1);
aph@7880 2288 } else {
aph@7880 2289 __ ldr(tmp, src_klass_addr);
aph@7880 2290 __ ldr(rscratch1, dst_klass_addr);
aph@7880 2291 __ cmp(tmp, rscratch1);
aph@7880 2292 }
aph@7880 2293 __ br(Assembler::NE, *stub->entry());
aph@7880 2294 } else {
aph@7880 2295 // For object arrays, if src is a sub class of dst then we can
aph@7880 2296 // safely do the copy.
aph@7880 2297 Label cont, slow;
aph@7880 2298
aph@7880 2299 #define PUSH(r1, r2) \
aph@7880 2300 stp(r1, r2, __ pre(sp, -2 * wordSize));
aph@7880 2301
aph@7880 2302 #define POP(r1, r2) \
aph@7880 2303 ldp(r1, r2, __ post(sp, 2 * wordSize));
aph@7880 2304
aph@7880 2305 __ PUSH(src, dst);
aph@7880 2306
aph@7880 2307 __ load_klass(src, src);
aph@7880 2308 __ load_klass(dst, dst);
aph@7880 2309
aph@7880 2310 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
aph@7880 2311
aph@7880 2312 __ PUSH(src, dst);
aph@7880 2313 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
aph@7880 2314 __ POP(src, dst);
aph@7880 2315
aph@7880 2316 __ cbnz(src, cont);
aph@7880 2317
aph@7880 2318 __ bind(slow);
aph@7880 2319 __ POP(src, dst);
aph@7880 2320
aph@7880 2321 address copyfunc_addr = StubRoutines::checkcast_arraycopy();
aph@7880 2322 if (copyfunc_addr != NULL) { // use stub if available
aph@7880 2323 // src is not a sub class of dst so we have to do a
aph@7880 2324 // per-element check.
aph@7880 2325
aph@7880 2326 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
aph@7880 2327 if ((flags & mask) != mask) {
aph@7880 2328 // Check that at least both of them object arrays.
aph@7880 2329 assert(flags & mask, "one of the two should be known to be an object array");
aph@7880 2330
aph@7880 2331 if (!(flags & LIR_OpArrayCopy::src_objarray)) {
aph@7880 2332 __ load_klass(tmp, src);
aph@7880 2333 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
aph@7880 2334 __ load_klass(tmp, dst);
aph@7880 2335 }
aph@7880 2336 int lh_offset = in_bytes(Klass::layout_helper_offset());
aph@7880 2337 Address klass_lh_addr(tmp, lh_offset);
aph@7880 2338 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
aph@7880 2339 __ ldrw(rscratch1, klass_lh_addr);
aph@7880 2340 __ mov(rscratch2, objArray_lh);
aph@7880 2341 __ eorw(rscratch1, rscratch1, rscratch2);
aph@7880 2342 __ cbnzw(rscratch1, *stub->entry());
aph@7880 2343 }
aph@7880 2344
aph@7880 2345 // Spill because stubs can use any register they like and it's
aph@7880 2346 // easier to restore just those that we care about.
aph@7880 2347 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
aph@7880 2348 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
aph@7880 2349 __ str(src, Address(sp, 4*BytesPerWord));
aph@7880 2350
aph@7880 2351 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
aph@7880 2352 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
aph@7880 2353 assert_different_registers(c_rarg0, dst, dst_pos, length);
aph@7880 2354 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
aph@7880 2355 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
aph@7880 2356 assert_different_registers(c_rarg1, dst, length);
aph@7880 2357 __ uxtw(c_rarg2, length);
aph@7880 2358 assert_different_registers(c_rarg2, dst);
aph@7880 2359
aph@7880 2360 __ load_klass(c_rarg4, dst);
aph@7880 2361 __ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
aph@7880 2362 __ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
aph@7880 2363 __ far_call(RuntimeAddress(copyfunc_addr));
aph@7880 2364
aph@7880 2365 #ifndef PRODUCT
aph@7880 2366 if (PrintC1Statistics) {
aph@7880 2367 Label failed;
aph@7880 2368 __ cbnz(r0, failed);
aph@7880 2369 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
aph@7880 2370 __ bind(failed);
aph@7880 2371 }
aph@7880 2372 #endif
aph@7880 2373
aph@7880 2374 __ cbz(r0, *stub->continuation());
aph@7880 2375
aph@7880 2376 #ifndef PRODUCT
aph@7880 2377 if (PrintC1Statistics) {
aph@7880 2378 __ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
aph@7880 2379 }
aph@7880 2380 #endif
aph@7880 2381 assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
aph@7880 2382
aph@7880 2383 // Restore previously spilled arguments
aph@7880 2384 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
aph@7880 2385 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
aph@7880 2386 __ ldr(src, Address(sp, 4*BytesPerWord));
aph@7880 2387
aph@7880 2388 // return value is -1^K where K is partial copied count
aph@7880 2389 __ eonw(rscratch1, r0, zr);
aph@7880 2390 // adjust length down and src/end pos up by partial copied count
aph@7880 2391 __ subw(length, length, rscratch1);
aph@7880 2392 __ addw(src_pos, src_pos, rscratch1);
aph@7880 2393 __ addw(dst_pos, dst_pos, rscratch1);
aph@7880 2394 }
aph@7880 2395
aph@7880 2396 __ b(*stub->entry());
aph@7880 2397
aph@7880 2398 __ bind(cont);
aph@7880 2399 __ POP(src, dst);
aph@7880 2400 }
aph@7880 2401 }
aph@7880 2402
aph@7880 2403 #ifdef ASSERT
aph@7880 2404 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
aph@7880 2405 // Sanity check the known type with the incoming class. For the
aph@7880 2406 // primitive case the types must match exactly with src.klass and
aph@7880 2407 // dst.klass each exactly matching the default type. For the
aph@7880 2408 // object array case, if no type check is needed then either the
aph@7880 2409 // dst type is exactly the expected type and the src type is a
aph@7880 2410 // subtype which we can't check or src is the same array as dst
aph@7880 2411 // but not necessarily exactly of type default_type.
aph@7880 2412 Label known_ok, halt;
aph@7880 2413 __ mov_metadata(tmp, default_type->constant_encoding());
aph@7880 2414 if (UseCompressedClassPointers) {
aph@7880 2415 __ encode_klass_not_null(tmp);
aph@7880 2416 }
aph@7880 2417
aph@7880 2418 if (basic_type != T_OBJECT) {
aph@7880 2419
aph@7880 2420 if (UseCompressedClassPointers) {
aph@7880 2421 __ ldrw(rscratch1, dst_klass_addr);
aph@7880 2422 __ cmpw(tmp, rscratch1);
aph@7880 2423 } else {
aph@7880 2424 __ ldr(rscratch1, dst_klass_addr);
aph@7880 2425 __ cmp(tmp, rscratch1);
aph@7880 2426 }
aph@7880 2427 __ br(Assembler::NE, halt);
aph@7880 2428 if (UseCompressedClassPointers) {
aph@7880 2429 __ ldrw(rscratch1, src_klass_addr);
aph@7880 2430 __ cmpw(tmp, rscratch1);
aph@7880 2431 } else {
aph@7880 2432 __ ldr(rscratch1, src_klass_addr);
aph@7880 2433 __ cmp(tmp, rscratch1);
aph@7880 2434 }
aph@7880 2435 __ br(Assembler::EQ, known_ok);
aph@7880 2436 } else {
aph@7880 2437 if (UseCompressedClassPointers) {
aph@7880 2438 __ ldrw(rscratch1, dst_klass_addr);
aph@7880 2439 __ cmpw(tmp, rscratch1);
aph@7880 2440 } else {
aph@7880 2441 __ ldr(rscratch1, dst_klass_addr);
aph@7880 2442 __ cmp(tmp, rscratch1);
aph@7880 2443 }
aph@7880 2444 __ br(Assembler::EQ, known_ok);
aph@7880 2445 __ cmp(src, dst);
aph@7880 2446 __ br(Assembler::EQ, known_ok);
aph@7880 2447 }
aph@7880 2448 __ bind(halt);
aph@7880 2449 __ stop("incorrect type information in arraycopy");
aph@7880 2450 __ bind(known_ok);
aph@7880 2451 }
aph@7880 2452 #endif
aph@7880 2453
aph@7880 2454 #ifndef PRODUCT
aph@7880 2455 if (PrintC1Statistics) {
aph@7880 2456 __ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
aph@7880 2457 }
aph@7880 2458 #endif
aph@7880 2459
aph@7880 2460 __ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
aph@7880 2461 __ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
aph@7880 2462 assert_different_registers(c_rarg0, dst, dst_pos, length);
aph@7880 2463 __ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
aph@7880 2464 __ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
aph@7880 2465 assert_different_registers(c_rarg1, dst, length);
aph@7880 2466 __ uxtw(c_rarg2, length);
aph@7880 2467 assert_different_registers(c_rarg2, dst);
aph@7880 2468
aph@7880 2469 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
aph@7880 2470 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
aph@7880 2471 const char *name;
aph@7880 2472 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
aph@7880 2473
aph@7880 2474 CodeBlob *cb = CodeCache::find_blob(entry);
aph@7880 2475 if (cb) {
aph@7880 2476 __ far_call(RuntimeAddress(entry));
aph@7880 2477 } else {
aph@7880 2478 __ call_VM_leaf(entry, 3);
aph@7880 2479 }
aph@7880 2480
aph@7880 2481 __ bind(*stub->continuation());
aph@7880 2482 }
aph@7880 2483
aph@7880 2484
aph@7880 2485
aph@7880 2486
aph@7880 2487 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
aph@7880 2488 Register obj = op->obj_opr()->as_register(); // may not be an oop
aph@7880 2489 Register hdr = op->hdr_opr()->as_register();
aph@7880 2490 Register lock = op->lock_opr()->as_register();
aph@7880 2491 if (!UseFastLocking) {
aph@7880 2492 __ b(*op->stub()->entry());
aph@7880 2493 } else if (op->code() == lir_lock) {
aph@7880 2494 Register scratch = noreg;
aph@7880 2495 if (UseBiasedLocking) {
aph@7880 2496 scratch = op->scratch_opr()->as_register();
aph@7880 2497 }
aph@7880 2498 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
aph@7880 2499 // add debug info for NullPointerException only if one is possible
aph@7880 2500 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
aph@7880 2501 if (op->info() != NULL) {
aph@7880 2502 add_debug_info_for_null_check(null_check_offset, op->info());
aph@7880 2503 }
aph@7880 2504 // done
aph@7880 2505 } else if (op->code() == lir_unlock) {
aph@7880 2506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
aph@7880 2507 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
aph@7880 2508 } else {
aph@7880 2509 Unimplemented();
aph@7880 2510 }
aph@7880 2511 __ bind(*op->stub()->continuation());
aph@7880 2512 }
aph@7880 2513
aph@7880 2514
aph@7880 2515 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
aph@7880 2516 ciMethod* method = op->profiled_method();
aph@7880 2517 int bci = op->profiled_bci();
aph@7880 2518 ciMethod* callee = op->profiled_callee();
aph@7880 2519
aph@7880 2520 // Update counter for all call types
aph@7880 2521 ciMethodData* md = method->method_data_or_null();
aph@7880 2522 assert(md != NULL, "Sanity");
aph@7880 2523 ciProfileData* data = md->bci_to_data(bci);
aph@7880 2524 assert(data->is_CounterData(), "need CounterData for calls");
aph@7880 2525 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
aph@7880 2526 Register mdo = op->mdo()->as_register();
aph@7880 2527 __ mov_metadata(mdo, md->constant_encoding());
aph@7880 2528 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
aph@7880 2529 Bytecodes::Code bc = method->java_code_at_bci(bci);
aph@7880 2530 const bool callee_is_static = callee->is_loaded() && callee->is_static();
aph@7880 2531 // Perform additional virtual call profiling for invokevirtual and
aph@7880 2532 // invokeinterface bytecodes
aph@7880 2533 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
aph@7880 2534 !callee_is_static && // required for optimized MH invokes
aph@7880 2535 C1ProfileVirtualCalls) {
aph@7880 2536 assert(op->recv()->is_single_cpu(), "recv must be allocated");
aph@7880 2537 Register recv = op->recv()->as_register();
aph@7880 2538 assert_different_registers(mdo, recv);
aph@7880 2539 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
aph@7880 2540 ciKlass* known_klass = op->known_holder();
aph@7880 2541 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
aph@7880 2542 // We know the type that will be seen at this call site; we can
aph@7880 2543 // statically update the MethodData* rather than needing to do
aph@7880 2544 // dynamic tests on the receiver type
aph@7880 2545
aph@7880 2546 // NOTE: we should probably put a lock around this search to
aph@7880 2547 // avoid collisions by concurrent compilations
aph@7880 2548 ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
aph@7880 2549 uint i;
aph@7880 2550 for (i = 0; i < VirtualCallData::row_limit(); i++) {
aph@7880 2551 ciKlass* receiver = vc_data->receiver(i);
aph@7880 2552 if (known_klass->equals(receiver)) {
aph@7880 2553 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
aph@7880 2554 __ addptr(data_addr, DataLayout::counter_increment);
aph@7880 2555 return;
aph@7880 2556 }
aph@7880 2557 }
aph@7880 2558
aph@7880 2559 // Receiver type not found in profile data; select an empty slot
aph@7880 2560
aph@7880 2561 // Note that this is less efficient than it should be because it
aph@7880 2562 // always does a write to the receiver part of the
aph@7880 2563 // VirtualCallData rather than just the first time
aph@7880 2564 for (i = 0; i < VirtualCallData::row_limit(); i++) {
aph@7880 2565 ciKlass* receiver = vc_data->receiver(i);
aph@7880 2566 if (receiver == NULL) {
aph@7880 2567 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
aph@7880 2568 __ mov_metadata(rscratch1, known_klass->constant_encoding());
aph@7880 2569 __ lea(rscratch2, recv_addr);
aph@7880 2570 __ str(rscratch1, Address(rscratch2));
aph@7880 2571 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
aph@7880 2572 __ addptr(data_addr, DataLayout::counter_increment);
aph@7880 2573 return;
aph@7880 2574 }
aph@7880 2575 }
aph@7880 2576 } else {
aph@7880 2577 __ load_klass(recv, recv);
aph@7880 2578 Label update_done;
aph@7880 2579 type_profile_helper(mdo, md, data, recv, &update_done);
aph@7880 2580 // Receiver did not match any saved receiver and there is no empty row for it.
aph@7880 2581 // Increment total counter to indicate polymorphic case.
aph@7880 2582 __ addptr(counter_addr, DataLayout::counter_increment);
aph@7880 2583
aph@7880 2584 __ bind(update_done);
aph@7880 2585 }
aph@7880 2586 } else {
aph@7880 2587 // Static call
aph@7880 2588 __ addptr(counter_addr, DataLayout::counter_increment);
aph@7880 2589 }
aph@7880 2590 }
aph@7880 2591
aph@7880 2592
aph@7880 2593 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
aph@7880 2594 Unimplemented();
aph@7880 2595 }
aph@7880 2596
aph@7880 2597
aph@7880 2598 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
aph@7880 2599 __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
aph@7880 2600 }
aph@7880 2601
aph@7880 2602 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
aph@7880 2603 assert(op->crc()->is_single_cpu(), "crc must be register");
aph@7880 2604 assert(op->val()->is_single_cpu(), "byte value must be register");
aph@7880 2605 assert(op->result_opr()->is_single_cpu(), "result must be register");
aph@7880 2606 Register crc = op->crc()->as_register();
aph@7880 2607 Register val = op->val()->as_register();
aph@7880 2608 Register res = op->result_opr()->as_register();
aph@7880 2609
aph@7880 2610 assert_different_registers(val, crc, res);
aph@7880 2611 unsigned long offset;
aph@7880 2612 __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
aph@7880 2613 if (offset) __ add(res, res, offset);
aph@7880 2614
aph@7880 2615 __ ornw(crc, zr, crc); // ~crc
aph@7880 2616 __ update_byte_crc32(crc, val, res);
aph@7880 2617 __ ornw(res, zr, crc); // ~crc
aph@7880 2618 }
aph@7880 2619
aph@7880 2620 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
aph@7880 2621 COMMENT("emit_profile_type {");
aph@7880 2622 Register obj = op->obj()->as_register();
aph@7880 2623 Register tmp = op->tmp()->as_pointer_register();
aph@7880 2624 Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
aph@7880 2625 ciKlass* exact_klass = op->exact_klass();
aph@7880 2626 intptr_t current_klass = op->current_klass();
aph@7880 2627 bool not_null = op->not_null();
aph@7880 2628 bool no_conflict = op->no_conflict();
aph@7880 2629
aph@7880 2630 Label update, next, none;
aph@7880 2631
aph@7880 2632 bool do_null = !not_null;
aph@7880 2633 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
aph@7880 2634 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
aph@7880 2635
aph@7880 2636 assert(do_null || do_update, "why are we here?");
aph@7880 2637 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
aph@7880 2638 assert(mdo_addr.base() != rscratch1, "wrong register");
aph@7880 2639
aph@7880 2640 __ verify_oop(obj);
aph@7880 2641
aph@7880 2642 if (tmp != obj) {
aph@7880 2643 __ mov(tmp, obj);
aph@7880 2644 }
aph@7880 2645 if (do_null) {
aph@7880 2646 __ cbnz(tmp, update);
aph@7880 2647 if (!TypeEntries::was_null_seen(current_klass)) {
aph@7880 2648 __ ldr(rscratch2, mdo_addr);
aph@7880 2649 __ orr(rscratch2, rscratch2, TypeEntries::null_seen);
aph@7880 2650 __ str(rscratch2, mdo_addr);
aph@7880 2651 }
aph@7880 2652 if (do_update) {
aph@7880 2653 #ifndef ASSERT
aph@7880 2654 __ b(next);
aph@7880 2655 }
aph@7880 2656 #else
aph@7880 2657 __ b(next);
aph@7880 2658 }
aph@7880 2659 } else {
aph@7880 2660 __ cbnz(tmp, update);
aph@7880 2661 __ stop("unexpected null obj");
aph@7880 2662 #endif
aph@7880 2663 }
aph@7880 2664
aph@7880 2665 __ bind(update);
aph@7880 2666
aph@7880 2667 if (do_update) {
aph@7880 2668 #ifdef ASSERT
aph@7880 2669 if (exact_klass != NULL) {
aph@7880 2670 Label ok;
aph@7880 2671 __ load_klass(tmp, tmp);
aph@7880 2672 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
aph@7880 2673 __ eor(rscratch1, tmp, rscratch1);
aph@7880 2674 __ cbz(rscratch1, ok);
aph@7880 2675 __ stop("exact klass and actual klass differ");
aph@7880 2676 __ bind(ok);
aph@7880 2677 }
aph@7880 2678 #endif
aph@7880 2679 if (!no_conflict) {
aph@7880 2680 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
aph@7880 2681 if (exact_klass != NULL) {
aph@7880 2682 __ mov_metadata(tmp, exact_klass->constant_encoding());
aph@7880 2683 } else {
aph@7880 2684 __ load_klass(tmp, tmp);
aph@7880 2685 }
aph@7880 2686
aph@7880 2687 __ ldr(rscratch2, mdo_addr);
aph@7880 2688 __ eor(tmp, tmp, rscratch2);
aph@7880 2689 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
aph@7880 2690 // klass seen before, nothing to do. The unknown bit may have been
aph@7880 2691 // set already but no need to check.
aph@7880 2692 __ cbz(rscratch1, next);
aph@7880 2693
aph@7880 2694 __ andr(rscratch1, tmp, TypeEntries::type_unknown);
aph@7880 2695 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
aph@7880 2696
aph@7880 2697 if (TypeEntries::is_type_none(current_klass)) {
aph@7880 2698 __ cbz(rscratch2, none);
aph@7880 2699 __ cmp(rscratch2, TypeEntries::null_seen);
aph@7880 2700 __ br(Assembler::EQ, none);
aph@7880 2701 // There is a chance that the checks above (re-reading profiling
aph@7880 2702 // data from memory) fail if another thread has just set the
aph@7880 2703 // profiling to this obj's klass
aph@7880 2704 __ dmb(Assembler::ISHLD);
aph@7880 2705 __ ldr(rscratch2, mdo_addr);
aph@7880 2706 __ eor(tmp, tmp, rscratch2);
aph@7880 2707 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
aph@7880 2708 __ cbz(rscratch1, next);
aph@7880 2709 }
aph@7880 2710 } else {
aph@7880 2711 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
aph@7880 2712 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
aph@7880 2713
aph@7880 2714 __ ldr(tmp, mdo_addr);
aph@7880 2715 __ andr(rscratch1, tmp, TypeEntries::type_unknown);
aph@7880 2716 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
aph@7880 2717 }
aph@7880 2718
aph@7880 2719 // different than before. Cannot keep accurate profile.
aph@7880 2720 __ ldr(rscratch2, mdo_addr);
aph@7880 2721 __ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
aph@7880 2722 __ str(rscratch2, mdo_addr);
aph@7880 2723
aph@7880 2724 if (TypeEntries::is_type_none(current_klass)) {
aph@7880 2725 __ b(next);
aph@7880 2726
aph@7880 2727 __ bind(none);
aph@7880 2728 // first time here. Set profile type.
aph@7880 2729 __ str(tmp, mdo_addr);
aph@7880 2730 }
aph@7880 2731 } else {
aph@7880 2732 // There's a single possible klass at this profile point
aph@7880 2733 assert(exact_klass != NULL, "should be");
aph@7880 2734 if (TypeEntries::is_type_none(current_klass)) {
aph@7880 2735 __ mov_metadata(tmp, exact_klass->constant_encoding());
aph@7880 2736 __ ldr(rscratch2, mdo_addr);
aph@7880 2737 __ eor(tmp, tmp, rscratch2);
aph@7880 2738 __ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
aph@7880 2739 __ cbz(rscratch1, next);
aph@7880 2740 #ifdef ASSERT
aph@7880 2741 {
aph@7880 2742 Label ok;
aph@7880 2743 __ ldr(rscratch1, mdo_addr);
aph@7880 2744 __ cbz(rscratch1, ok);
aph@7880 2745 __ cmp(rscratch1, TypeEntries::null_seen);
aph@7880 2746 __ br(Assembler::EQ, ok);
aph@7880 2747 // may have been set by another thread
aph@7880 2748 __ dmb(Assembler::ISHLD);
aph@7880 2749 __ mov_metadata(rscratch1, exact_klass->constant_encoding());
aph@7880 2750 __ ldr(rscratch2, mdo_addr);
aph@7880 2751 __ eor(rscratch2, rscratch1, rscratch2);
aph@7880 2752 __ andr(rscratch2, rscratch2, TypeEntries::type_mask);
aph@7880 2753 __ cbz(rscratch2, ok);
aph@7880 2754
aph@7880 2755 __ stop("unexpected profiling mismatch");
aph@7880 2756 __ bind(ok);
aph@7880 2757 }
aph@7880 2758 #endif
aph@7880 2759 // first time here. Set profile type.
aph@7880 2760 __ ldr(tmp, mdo_addr);
aph@7880 2761 } else {
aph@7880 2762 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
aph@7880 2763 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
aph@7880 2764
aph@7880 2765 __ ldr(tmp, mdo_addr);
aph@7880 2766 __ andr(rscratch1, tmp, TypeEntries::type_unknown);
aph@7880 2767 __ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
aph@7880 2768
aph@7880 2769 __ orr(tmp, tmp, TypeEntries::type_unknown);
aph@7880 2770 __ str(tmp, mdo_addr);
aph@7880 2771 // FIXME: Write barrier needed here?
aph@7880 2772 }
aph@7880 2773 }
aph@7880 2774
aph@7880 2775 __ bind(next);
aph@7880 2776 }
aph@7880 2777 COMMENT("} emit_profile_type");
aph@7880 2778 }
aph@7880 2779
aph@7880 2780
aph@7880 2781 void LIR_Assembler::align_backward_branch_target() {
aph@7880 2782 }
aph@7880 2783
aph@7880 2784
aph@7880 2785 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
aph@7880 2786 if (left->is_single_cpu()) {
aph@7880 2787 assert(dest->is_single_cpu(), "expect single result reg");
aph@7880 2788 __ negw(dest->as_register(), left->as_register());
aph@7880 2789 } else if (left->is_double_cpu()) {
aph@7880 2790 assert(dest->is_double_cpu(), "expect double result reg");
aph@7880 2791 __ neg(dest->as_register_lo(), left->as_register_lo());
aph@7880 2792 } else if (left->is_single_fpu()) {
aph@7880 2793 assert(dest->is_single_fpu(), "expect single float result reg");
aph@7880 2794 __ fnegs(dest->as_float_reg(), left->as_float_reg());
aph@7880 2795 } else {
aph@7880 2796 assert(left->is_double_fpu(), "expect double float operand reg");
aph@7880 2797 assert(dest->is_double_fpu(), "expect double float result reg");
aph@7880 2798 __ fnegd(dest->as_double_reg(), left->as_double_reg());
aph@7880 2799 }
aph@7880 2800 }
aph@7880 2801
aph@7880 2802
aph@7880 2803 void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
aph@7880 2804 __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
aph@7880 2805 }
aph@7880 2806
aph@7880 2807
aph@7880 2808 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
aph@7880 2809 assert(!tmp->is_valid(), "don't need temporary");
aph@7880 2810
aph@7880 2811 CodeBlob *cb = CodeCache::find_blob(dest);
aph@7880 2812 if (cb) {
aph@7880 2813 __ far_call(RuntimeAddress(dest));
aph@7880 2814 } else {
aph@7880 2815 __ mov(rscratch1, RuntimeAddress(dest));
aph@7880 2816 int len = args->length();
aph@7880 2817 int type = 0;
aph@7880 2818 if (! result->is_illegal()) {
aph@7880 2819 switch (result->type()) {
aph@7880 2820 case T_VOID:
aph@7880 2821 type = 0;
aph@7880 2822 break;
aph@7880 2823 case T_INT:
aph@7880 2824 case T_LONG:
aph@7880 2825 case T_OBJECT:
aph@7880 2826 type = 1;
aph@7880 2827 break;
aph@7880 2828 case T_FLOAT:
aph@7880 2829 type = 2;
aph@7880 2830 break;
aph@7880 2831 case T_DOUBLE:
aph@7880 2832 type = 3;
aph@7880 2833 break;
aph@7880 2834 default:
aph@7880 2835 ShouldNotReachHere();
aph@7880 2836 break;
aph@7880 2837 }
aph@7880 2838 }
aph@7880 2839 int num_gpargs = 0;
aph@7880 2840 int num_fpargs = 0;
aph@7880 2841 for (int i = 0; i < args->length(); i++) {
aph@7880 2842 LIR_Opr arg = args->at(i);
aph@7880 2843 if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
aph@7880 2844 num_fpargs++;
aph@7880 2845 } else {
aph@7880 2846 num_gpargs++;
aph@7880 2847 }
aph@7880 2848 }
aph@7880 2849 __ blrt(rscratch1, num_gpargs, num_fpargs, type);
aph@7880 2850 }
aph@7880 2851
aph@7880 2852 if (info != NULL) {
aph@7880 2853 add_call_info_here(info);
aph@7880 2854 }
aph@7880 2855 __ maybe_isb();
aph@7880 2856 }
aph@7880 2857
aph@7880 2858 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
aph@7880 2859 if (dest->is_address() || src->is_address()) {
aph@7880 2860 move_op(src, dest, type, lir_patch_none, info,
aph@7880 2861 /*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
aph@7880 2862 } else {
aph@7880 2863 ShouldNotReachHere();
aph@7880 2864 }
aph@7880 2865 }
aph@7880 2866
aph@7880 2867 #ifdef ASSERT
aph@7880 2868 // emit run-time assertion
aph@7880 2869 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
aph@7880 2870 assert(op->code() == lir_assert, "must be");
aph@7880 2871
aph@7880 2872 if (op->in_opr1()->is_valid()) {
aph@7880 2873 assert(op->in_opr2()->is_valid(), "both operands must be valid");
aph@7880 2874 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
aph@7880 2875 } else {
aph@7880 2876 assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
aph@7880 2877 assert(op->condition() == lir_cond_always, "no other conditions allowed");
aph@7880 2878 }
aph@7880 2879
aph@7880 2880 Label ok;
aph@7880 2881 if (op->condition() != lir_cond_always) {
aph@7880 2882 Assembler::Condition acond = Assembler::AL;
aph@7880 2883 switch (op->condition()) {
aph@7880 2884 case lir_cond_equal: acond = Assembler::EQ; break;
aph@7880 2885 case lir_cond_notEqual: acond = Assembler::NE; break;
aph@7880 2886 case lir_cond_less: acond = Assembler::LT; break;
aph@7880 2887 case lir_cond_lessEqual: acond = Assembler::LE; break;
aph@7880 2888 case lir_cond_greaterEqual: acond = Assembler::GE; break;
aph@7880 2889 case lir_cond_greater: acond = Assembler::GT; break;
aph@7880 2890 case lir_cond_belowEqual: acond = Assembler::LS; break;
aph@7880 2891 case lir_cond_aboveEqual: acond = Assembler::HS; break;
aph@7880 2892 default: ShouldNotReachHere();
aph@7880 2893 }
aph@7880 2894 __ br(acond, ok);
aph@7880 2895 }
aph@7880 2896 if (op->halt()) {
aph@7880 2897 const char* str = __ code_string(op->msg());
aph@7880 2898 __ stop(str);
aph@7880 2899 } else {
aph@7880 2900 breakpoint();
aph@7880 2901 }
aph@7880 2902 __ bind(ok);
aph@7880 2903 }
aph@7880 2904 #endif
aph@7880 2905
aph@7880 2906 #ifndef PRODUCT
aph@7880 2907 #define COMMENT(x) do { __ block_comment(x); } while (0)
aph@7880 2908 #else
aph@7880 2909 #define COMMENT(x)
aph@7880 2910 #endif
aph@7880 2911
aph@7880 2912 void LIR_Assembler::membar() {
aph@7880 2913 COMMENT("membar");
aph@7880 2914 __ membar(MacroAssembler::AnyAny);
aph@7880 2915 }
aph@7880 2916
aph@7880 2917 void LIR_Assembler::membar_acquire() {
aph@7880 2918 __ membar(Assembler::LoadLoad|Assembler::LoadStore);
aph@7880 2919 }
aph@7880 2920
aph@7880 2921 void LIR_Assembler::membar_release() {
aph@7880 2922 __ membar(Assembler::LoadStore|Assembler::StoreStore);
aph@7880 2923 }
aph@7880 2924
aph@7880 2925 void LIR_Assembler::membar_loadload() {
aph@7880 2926 __ membar(Assembler::LoadLoad);
aph@7880 2927 }
aph@7880 2928
aph@7880 2929 void LIR_Assembler::membar_storestore() {
aph@7880 2930 __ membar(MacroAssembler::StoreStore);
aph@7880 2931 }
aph@7880 2932
aph@7880 2933 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
aph@7880 2934
aph@7880 2935 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
aph@7880 2936
ikrylov@10962 2937 void LIR_Assembler::on_spin_wait() {
ikrylov@10962 2938 Unimplemented();
ikrylov@10962 2939 }
ikrylov@10962 2940
aph@7880 2941 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
aph@7880 2942 __ mov(result_reg->as_register(), rthread);
aph@7880 2943 }
aph@7880 2944
aph@7880 2945
aph@7880 2946 void LIR_Assembler::peephole(LIR_List *lir) {
aph@7880 2947 #if 0
aph@7880 2948 if (tableswitch_count >= max_tableswitches)
aph@7880 2949 return;
aph@7880 2950
aph@7880 2951 /*
aph@7880 2952 This finite-state automaton recognizes sequences of compare-and-
aph@7880 2953 branch instructions. We will turn them into a tableswitch. You
aph@7880 2954 could argue that C1 really shouldn't be doing this sort of
aph@7880 2955 optimization, but without it the code is really horrible.
aph@7880 2956 */
aph@7880 2957
aph@7880 2958 enum { start_s, cmp1_s, beq_s, cmp_s } state;
aph@7880 2959 int first_key, last_key = -2147483648;
aph@7880 2960 int next_key = 0;
aph@7880 2961 int start_insn = -1;
aph@7880 2962 int last_insn = -1;
aph@7880 2963 Register reg = noreg;
aph@7880 2964 LIR_Opr reg_opr;
aph@7880 2965 state = start_s;
aph@7880 2966
aph@7880 2967 LIR_OpList* inst = lir->instructions_list();
aph@7880 2968 for (int i = 0; i < inst->length(); i++) {
aph@7880 2969 LIR_Op* op = inst->at(i);
aph@7880 2970 switch (state) {
aph@7880 2971 case start_s:
aph@7880 2972 first_key = -1;
aph@7880 2973 start_insn = i;
aph@7880 2974 switch (op->code()) {
aph@7880 2975 case lir_cmp:
aph@7880 2976 LIR_Opr opr1 = op->as_Op2()->in_opr1();
aph@7880 2977 LIR_Opr opr2 = op->as_Op2()->in_opr2();
aph@7880 2978 if (opr1->is_cpu_register() && opr1->is_single_cpu()
aph@7880 2979 && opr2->is_constant()
aph@7880 2980 && opr2->type() == T_INT) {
aph@7880 2981 reg_opr = opr1;
aph@7880 2982 reg = opr1->as_register();
aph@7880 2983 first_key = opr2->as_constant_ptr()->as_jint();
aph@7880 2984 next_key = first_key + 1;
aph@7880 2985 state = cmp_s;
aph@7880 2986 goto next_state;
aph@7880 2987 }
aph@7880 2988 break;
aph@7880 2989 }
aph@7880 2990 break;
aph@7880 2991 case cmp_s:
aph@7880 2992 switch (op->code()) {
aph@7880 2993 case lir_branch:
aph@7880 2994 if (op->as_OpBranch()->cond() == lir_cond_equal) {
aph@7880 2995 state = beq_s;
aph@7880 2996 last_insn = i;
aph@7880 2997 goto next_state;
aph@7880 2998 }
aph@7880 2999 }
aph@7880 3000 state = start_s;
aph@7880 3001 break;
aph@7880 3002 case beq_s:
aph@7880 3003 switch (op->code()) {
aph@7880 3004 case lir_cmp: {
aph@7880 3005 LIR_Opr opr1 = op->as_Op2()->in_opr1();
aph@7880 3006 LIR_Opr opr2 = op->as_Op2()->in_opr2();
aph@7880 3007 if (opr1->is_cpu_register() && opr1->is_single_cpu()
aph@7880 3008 && opr1->as_register() == reg
aph@7880 3009 && opr2->is_constant()
aph@7880 3010 && opr2->type() == T_INT
aph@7880 3011 && opr2->as_constant_ptr()->as_jint() == next_key) {
aph@7880 3012 last_key = next_key;
aph@7880 3013 next_key++;
aph@7880 3014 state = cmp_s;
aph@7880 3015 goto next_state;
aph@7880 3016 }
aph@7880 3017 }
aph@7880 3018 }
aph@7880 3019 last_key = next_key;
aph@7880 3020 state = start_s;
aph@7880 3021 break;
aph@7880 3022 default:
aph@7880 3023 assert(false, "impossible state");
aph@7880 3024 }
aph@7880 3025 if (state == start_s) {
aph@7880 3026 if (first_key < last_key - 5L && reg != noreg) {
aph@7880 3027 {
aph@7880 3028 // printf("found run register %d starting at insn %d low value %d high value %d\n",
aph@7880 3029 // reg->encoding(),
aph@7880 3030 // start_insn, first_key, last_key);
aph@7880 3031 // for (int i = 0; i < inst->length(); i++) {
aph@7880 3032 // inst->at(i)->print();
aph@7880 3033 // tty->print("\n");
aph@7880 3034 // }
aph@7880 3035 // tty->print("\n");
aph@7880 3036 }
aph@7880 3037
aph@7880 3038 struct tableswitch *sw = &switches[tableswitch_count];
aph@7880 3039 sw->_insn_index = start_insn, sw->_first_key = first_key,
aph@7880 3040 sw->_last_key = last_key, sw->_reg = reg;
aph@7880 3041 inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
aph@7880 3042 {
aph@7880 3043 // Insert the new table of branches
aph@7880 3044 int offset = last_insn;
aph@7880 3045 for (int n = first_key; n < last_key; n++) {
aph@7880 3046 inst->insert_before
aph@7880 3047 (last_insn + 1,
aph@7880 3048 new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
aph@7880 3049 inst->at(offset)->as_OpBranch()->label()));
aph@7880 3050 offset -= 2, i++;
aph@7880 3051 }
aph@7880 3052 }
aph@7880 3053 // Delete all the old compare-and-branch instructions
aph@7880 3054 for (int n = first_key; n < last_key; n++) {
aph@7880 3055 inst->remove_at(start_insn);
aph@7880 3056 inst->remove_at(start_insn);
aph@7880 3057 }
aph@7880 3058 // Insert the tableswitch instruction
aph@7880 3059 inst->insert_before(start_insn,
aph@7880 3060 new LIR_Op2(lir_cmp, lir_cond_always,
aph@7880 3061 LIR_OprFact::intConst(tableswitch_count),
aph@7880 3062 reg_opr));
aph@7880 3063 inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
aph@7880 3064 tableswitch_count++;
aph@7880 3065 }
aph@7880 3066 reg = noreg;
aph@7880 3067 last_key = -2147483648;
aph@7880 3068 }
aph@7880 3069 next_state:
aph@7880 3070 ;
aph@7880 3071 }
aph@7880 3072 #endif
aph@7880 3073 }
aph@7880 3074
aph@7880 3075 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
aph@7880 3076 Address addr = as_Address(src->as_address_ptr(), noreg);
aph@7880 3077 BasicType type = src->type();
aph@7880 3078 bool is_oop = type == T_OBJECT || type == T_ARRAY;
aph@7880 3079
enevill@10782 3080 void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
enevill@10782 3081 void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
aph@7880 3082
aph@7880 3083 switch(type) {
aph@7880 3084 case T_INT:
enevill@10782 3085 xchg = &MacroAssembler::atomic_xchgalw;
enevill@10782 3086 add = &MacroAssembler::atomic_addalw;
aph@7880 3087 break;
aph@7880 3088 case T_LONG:
enevill@10782 3089 xchg = &MacroAssembler::atomic_xchgal;
enevill@10782 3090 add = &MacroAssembler::atomic_addal;
aph@7880 3091 break;
aph@7880 3092 case T_OBJECT:
aph@7880 3093 case T_ARRAY:
aph@7880 3094 if (UseCompressedOops) {
enevill@10782 3095 xchg = &MacroAssembler::atomic_xchgalw;
enevill@10782 3096 add = &MacroAssembler::atomic_addalw;
aph@7880 3097 } else {
enevill@10782 3098 xchg = &MacroAssembler::atomic_xchgal;
enevill@10782 3099 add = &MacroAssembler::atomic_addal;
aph@7880 3100 }
aph@7880 3101 break;
aph@7880 3102 default:
aph@7880 3103 ShouldNotReachHere();
enevill@10782 3104 xchg = &MacroAssembler::atomic_xchgal;
enevill@10782 3105 add = &MacroAssembler::atomic_addal; // unreachable
aph@7880 3106 }
aph@7880 3107
aph@7880 3108 switch (code) {
aph@7880 3109 case lir_xadd:
aph@7880 3110 {
aph@7880 3111 RegisterOrConstant inc;
aph@7880 3112 Register tmp = as_reg(tmp_op);
aph@7880 3113 Register dst = as_reg(dest);
aph@7880 3114 if (data->is_constant()) {
aph@7880 3115 inc = RegisterOrConstant(as_long(data));
aph@7880 3116 assert_different_registers(dst, addr.base(), tmp,
aph@7880 3117 rscratch1, rscratch2);
aph@7880 3118 } else {
aph@7880 3119 inc = RegisterOrConstant(as_reg(data));
aph@7880 3120 assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
aph@7880 3121 rscratch1, rscratch2);
aph@7880 3122 }
aph@7880 3123 __ lea(tmp, addr);
enevill@10782 3124 (_masm->*add)(dst, inc, tmp);
aph@7880 3125 break;
aph@7880 3126 }
aph@7880 3127 case lir_xchg:
aph@7880 3128 {
aph@7880 3129 Register tmp = tmp_op->as_register();
aph@7880 3130 Register obj = as_reg(data);
aph@7880 3131 Register dst = as_reg(dest);
aph@7880 3132 if (is_oop && UseCompressedOops) {
enevill@10787 3133 __ encode_heap_oop(rscratch2, obj);
enevill@10787 3134 obj = rscratch2;
aph@7880 3135 }
enevill@10787 3136 assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
aph@7880 3137 __ lea(tmp, addr);
enevill@10782 3138 (_masm->*xchg)(dst, obj, tmp);
aph@7880 3139 if (is_oop && UseCompressedOops) {
aph@7880 3140 __ decode_heap_oop(dst);
aph@7880 3141 }
aph@7880 3142 }
aph@7880 3143 break;
aph@7880 3144 default:
aph@7880 3145 ShouldNotReachHere();
aph@7880 3146 }
aph@7880 3147 __ membar(__ AnyAny);
aph@7880 3148 }
aph@7880 3149
aph@7880 3150 #undef __