annotate src/cpu/sparc/vm/nativeInst_sparc.cpp @ 0:a61af66fc99e

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children 018d5b58dd4f
rev   line source
duke@0 1 /*
duke@0 2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 # include "incls/_precompiled.incl"
duke@0 26 # include "incls/_nativeInst_sparc.cpp.incl"
duke@0 27
duke@0 28
duke@0 29 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
duke@0 30 ResourceMark rm;
duke@0 31 CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
duke@0 32 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@0 33 Register destreg;
duke@0 34
duke@0 35 destreg = inv_rd(*(unsigned int *)instaddr);
duke@0 36 // Generate a the new sequence
duke@0 37 Address dest( destreg, (address)x );
duke@0 38 _masm->sethi( dest, true );
duke@0 39 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
duke@0 40 }
duke@0 41
duke@0 42 void NativeInstruction::verify() {
duke@0 43 // make sure code pattern is actually an instruction address
duke@0 44 address addr = addr_at(0);
duke@0 45 if (addr == 0 || ((intptr_t)addr & 3) != 0) {
duke@0 46 fatal("not an instruction address");
duke@0 47 }
duke@0 48 }
duke@0 49
duke@0 50 void NativeInstruction::print() {
duke@0 51 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0));
duke@0 52 }
duke@0 53
duke@0 54 void NativeInstruction::set_long_at(int offset, int i) {
duke@0 55 address addr = addr_at(offset);
duke@0 56 *(int*)addr = i;
duke@0 57 ICache::invalidate_word(addr);
duke@0 58 }
duke@0 59
duke@0 60 void NativeInstruction::set_jlong_at(int offset, jlong i) {
duke@0 61 address addr = addr_at(offset);
duke@0 62 *(jlong*)addr = i;
duke@0 63 // Don't need to invalidate 2 words here, because
duke@0 64 // the flush instruction operates on doublewords.
duke@0 65 ICache::invalidate_word(addr);
duke@0 66 }
duke@0 67
duke@0 68 void NativeInstruction::set_addr_at(int offset, address x) {
duke@0 69 address addr = addr_at(offset);
duke@0 70 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
duke@0 71 *(uintptr_t*)addr = (uintptr_t)x;
duke@0 72 // Don't need to invalidate 2 words here in the 64-bit case,
duke@0 73 // because the flush instruction operates on doublewords.
duke@0 74 ICache::invalidate_word(addr);
duke@0 75 // The Intel code has this assertion for NativeCall::set_destination,
duke@0 76 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
duke@0 77 // NativeJump::set_jump_destination, and NativePushImm32::set_data
duke@0 78 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
duke@0 79 }
duke@0 80
duke@0 81 bool NativeInstruction::is_zero_test(Register &reg) {
duke@0 82 int x = long_at(0);
duke@0 83 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
duke@0 84 if (is_op3(x, temp, Assembler::arith_op) &&
duke@0 85 inv_immed(x) && inv_rd(x) == G0) {
duke@0 86 if (inv_rs1(x) == G0) {
duke@0 87 reg = inv_rs2(x);
duke@0 88 return true;
duke@0 89 } else if (inv_rs2(x) == G0) {
duke@0 90 reg = inv_rs1(x);
duke@0 91 return true;
duke@0 92 }
duke@0 93 }
duke@0 94 return false;
duke@0 95 }
duke@0 96
duke@0 97 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
duke@0 98 int x = long_at(0);
duke@0 99 if (is_op(x, Assembler::ldst_op) &&
duke@0 100 inv_rs1(x) == reg && inv_immed(x)) {
duke@0 101 return true;
duke@0 102 }
duke@0 103 return false;
duke@0 104 }
duke@0 105
duke@0 106 void NativeCall::verify() {
duke@0 107 NativeInstruction::verify();
duke@0 108 // make sure code pattern is actually a call instruction
duke@0 109 if (!is_op(long_at(0), Assembler::call_op)) {
duke@0 110 fatal("not a call");
duke@0 111 }
duke@0 112 }
duke@0 113
duke@0 114 void NativeCall::print() {
duke@0 115 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@0 116 }
duke@0 117
duke@0 118
duke@0 119 // MT-safe patching of a call instruction (and following word).
duke@0 120 // First patches the second word, and then atomicly replaces
duke@0 121 // the first word with the first new instruction word.
duke@0 122 // Other processors might briefly see the old first word
duke@0 123 // followed by the new second word. This is OK if the old
duke@0 124 // second word is harmless, and the new second word may be
duke@0 125 // harmlessly executed in the delay slot of the call.
duke@0 126 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@0 127 assert(Patching_lock->is_locked() ||
duke@0 128 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@0 129 assert (instr_addr != NULL, "illegal address for code patching");
duke@0 130 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
duke@0 131 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
duke@0 132 int i0 = ((int*)code_buffer)[0];
duke@0 133 int i1 = ((int*)code_buffer)[1];
duke@0 134 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
duke@0 135 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@0 136 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@0 137 "must not interfere with original call");
duke@0 138 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@0 139 n_call->set_long_at(1*BytesPerInstWord, i1);
duke@0 140 n_call->set_long_at(0*BytesPerInstWord, i0);
duke@0 141 // NOTE: It is possible that another thread T will execute
duke@0 142 // only the second patched word.
duke@0 143 // In other words, since the original instruction is this
duke@0 144 // call patching_stub; nop (NativeCall)
duke@0 145 // and the new sequence from the buffer is this:
duke@0 146 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@0 147 // what T will execute is this:
duke@0 148 // call patching_stub; add %r, %lo(K), %r
duke@0 149 // thereby putting garbage into %r before calling the patching stub.
duke@0 150 // This is OK, because the patching stub ignores the value of %r.
duke@0 151
duke@0 152 // Make sure the first-patched instruction, which may co-exist
duke@0 153 // briefly with the call, will do something harmless.
duke@0 154 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@0 155 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@0 156 "must not interfere with original call");
duke@0 157 }
duke@0 158
duke@0 159 // Similar to replace_mt_safe, but just changes the destination. The
duke@0 160 // important thing is that free-running threads are able to execute this
duke@0 161 // call instruction at all times. Thus, the displacement field must be
duke@0 162 // instruction-word-aligned. This is always true on SPARC.
duke@0 163 //
duke@0 164 // Used in the runtime linkage of calls; see class CompiledIC.
duke@0 165 void NativeCall::set_destination_mt_safe(address dest) {
duke@0 166 assert(Patching_lock->is_locked() ||
duke@0 167 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@0 168 // set_destination uses set_long_at which does the ICache::invalidate
duke@0 169 set_destination(dest);
duke@0 170 }
duke@0 171
duke@0 172 // Code for unit testing implementation of NativeCall class
duke@0 173 void NativeCall::test() {
duke@0 174 #ifdef ASSERT
duke@0 175 ResourceMark rm;
duke@0 176 CodeBuffer cb("test", 100, 100);
duke@0 177 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 178 NativeCall *nc;
duke@0 179 uint idx;
duke@0 180 int offsets[] = {
duke@0 181 0x0,
duke@0 182 0xfffffff0,
duke@0 183 0x7ffffff0,
duke@0 184 0x80000000,
duke@0 185 0x20,
duke@0 186 0x4000,
duke@0 187 };
duke@0 188
duke@0 189 VM_Version::allow_all();
duke@0 190
duke@0 191 a->call( a->pc(), relocInfo::none );
duke@0 192 a->delayed()->nop();
duke@0 193 nc = nativeCall_at( cb.code_begin() );
duke@0 194 nc->print();
duke@0 195
duke@0 196 nc = nativeCall_overwriting_at( nc->next_instruction_address() );
duke@0 197 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@0 198 nc->set_destination( cb.code_begin() + offsets[idx] );
duke@0 199 assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test");
duke@0 200 nc->print();
duke@0 201 }
duke@0 202
duke@0 203 nc = nativeCall_before( cb.code_begin() + 8 );
duke@0 204 nc->print();
duke@0 205
duke@0 206 VM_Version::revert();
duke@0 207 #endif
duke@0 208 }
duke@0 209 // End code for unit testing implementation of NativeCall class
duke@0 210
duke@0 211 //-------------------------------------------------------------------
duke@0 212
duke@0 213 #ifdef _LP64
duke@0 214
duke@0 215 void NativeFarCall::set_destination(address dest) {
duke@0 216 // Address materialized in the instruction stream, so nothing to do.
duke@0 217 return;
duke@0 218 #if 0 // What we'd do if we really did want to change the destination
duke@0 219 if (destination() == dest) {
duke@0 220 return;
duke@0 221 }
duke@0 222 ResourceMark rm;
duke@0 223 CodeBuffer buf(addr_at(0), instruction_size + 1);
duke@0 224 MacroAssembler* _masm = new MacroAssembler(&buf);
duke@0 225 // Generate the new sequence
duke@0 226 Address(O7, dest);
duke@0 227 _masm->jumpl_to(dest, O7);
duke@0 228 ICache::invalidate_range(addr_at(0), instruction_size );
duke@0 229 #endif
duke@0 230 }
duke@0 231
duke@0 232 void NativeFarCall::verify() {
duke@0 233 // make sure code pattern is actually a jumpl_to instruction
duke@0 234 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
duke@0 235 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@0 236 nativeJump_at(addr_at(0))->verify();
duke@0 237 }
duke@0 238
duke@0 239 bool NativeFarCall::is_call_at(address instr) {
duke@0 240 return nativeInstruction_at(instr)->is_sethi();
duke@0 241 }
duke@0 242
duke@0 243 void NativeFarCall::print() {
duke@0 244 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination());
duke@0 245 }
duke@0 246
duke@0 247 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
duke@0 248 nmethod* callee = CodeCache::find_nmethod(destination());
duke@0 249 if (callee == NULL) {
duke@0 250 return false;
duke@0 251 } else {
duke@0 252 return destination() == callee->verified_entry_point();
duke@0 253 }
duke@0 254 }
duke@0 255
duke@0 256 // MT-safe patching of a far call.
duke@0 257 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
duke@0 258 Unimplemented();
duke@0 259 }
duke@0 260
duke@0 261 // Code for unit testing implementation of NativeFarCall class
duke@0 262 void NativeFarCall::test() {
duke@0 263 Unimplemented();
duke@0 264 }
duke@0 265 // End code for unit testing implementation of NativeFarCall class
duke@0 266
duke@0 267 #endif // _LP64
duke@0 268
duke@0 269 //-------------------------------------------------------------------
duke@0 270
duke@0 271
duke@0 272 void NativeMovConstReg::verify() {
duke@0 273 NativeInstruction::verify();
duke@0 274 // make sure code pattern is actually a "set_oop" synthetic instruction
duke@0 275 // see MacroAssembler::set_oop()
duke@0 276 int i0 = long_at(sethi_offset);
duke@0 277 int i1 = long_at(add_offset);
duke@0 278
duke@0 279 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg"
duke@0 280 Register rd = inv_rd(i0);
duke@0 281 #ifndef _LP64
duke@0 282 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@0 283 is_op3(i1, Assembler::add_op3, Assembler::arith_op) &&
duke@0 284 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@0 285 rd == inv_rs1(i1) && rd == inv_rd(i1))) {
duke@0 286 fatal("not a set_oop");
duke@0 287 }
duke@0 288 #else
duke@0 289 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
duke@0 290 fatal("not a set_oop");
duke@0 291 }
duke@0 292 #endif
duke@0 293 }
duke@0 294
duke@0 295
duke@0 296 void NativeMovConstReg::print() {
duke@0 297 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@0 298 }
duke@0 299
duke@0 300
duke@0 301 #ifdef _LP64
duke@0 302 intptr_t NativeMovConstReg::data() const {
duke@0 303 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@0 304 }
duke@0 305 #else
duke@0 306 intptr_t NativeMovConstReg::data() const {
duke@0 307 return data32(long_at(sethi_offset), long_at(add_offset));
duke@0 308 }
duke@0 309 #endif
duke@0 310
duke@0 311
duke@0 312 void NativeMovConstReg::set_data(intptr_t x) {
duke@0 313 #ifdef _LP64
duke@0 314 set_data64_sethi(addr_at(sethi_offset), x);
duke@0 315 #else
duke@0 316 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x));
duke@0 317 #endif
duke@0 318 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x));
duke@0 319
duke@0 320 // also store the value into an oop_Relocation cell, if any
duke@0 321 CodeBlob* nm = CodeCache::find_blob(instruction_address());
duke@0 322 if (nm != NULL) {
duke@0 323 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@0 324 oop* oop_addr = NULL;
duke@0 325 while (iter.next()) {
duke@0 326 if (iter.type() == relocInfo::oop_type) {
duke@0 327 oop_Relocation *r = iter.oop_reloc();
duke@0 328 if (oop_addr == NULL) {
duke@0 329 oop_addr = r->oop_addr();
duke@0 330 *oop_addr = (oop)x;
duke@0 331 } else {
duke@0 332 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@0 333 }
duke@0 334 }
duke@0 335 }
duke@0 336 }
duke@0 337 }
duke@0 338
duke@0 339
duke@0 340 // Code for unit testing implementation of NativeMovConstReg class
duke@0 341 void NativeMovConstReg::test() {
duke@0 342 #ifdef ASSERT
duke@0 343 ResourceMark rm;
duke@0 344 CodeBuffer cb("test", 100, 100);
duke@0 345 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 346 NativeMovConstReg* nm;
duke@0 347 uint idx;
duke@0 348 int offsets[] = {
duke@0 349 0x0,
duke@0 350 0x7fffffff,
duke@0 351 0x80000000,
duke@0 352 0xffffffff,
duke@0 353 0x20,
duke@0 354 4096,
duke@0 355 4097,
duke@0 356 };
duke@0 357
duke@0 358 VM_Version::allow_all();
duke@0 359
duke@0 360 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
duke@0 361 a->add(I3, low10(0xaaaabbbb), I3);
duke@0 362 a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
duke@0 363 a->add(O2, low10(0xccccdddd), O2);
duke@0 364
duke@0 365 nm = nativeMovConstReg_at( cb.code_begin() );
duke@0 366 nm->print();
duke@0 367
duke@0 368 nm = nativeMovConstReg_at( nm->next_instruction_address() );
duke@0 369 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@0 370 nm->set_data( offsets[idx] );
duke@0 371 assert(nm->data() == offsets[idx], "check unit test");
duke@0 372 }
duke@0 373 nm->print();
duke@0 374
duke@0 375 VM_Version::revert();
duke@0 376 #endif
duke@0 377 }
duke@0 378 // End code for unit testing implementation of NativeMovConstReg class
duke@0 379
duke@0 380 //-------------------------------------------------------------------
duke@0 381
duke@0 382 void NativeMovConstRegPatching::verify() {
duke@0 383 NativeInstruction::verify();
duke@0 384 // Make sure code pattern is sethi/nop/add.
duke@0 385 int i0 = long_at(sethi_offset);
duke@0 386 int i1 = long_at(nop_offset);
duke@0 387 int i2 = long_at(add_offset);
duke@0 388 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@0 389
duke@0 390 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
duke@0 391 // The casual reader should note that on Sparc a nop is a special case if sethi
duke@0 392 // in which the destination register is %g0.
duke@0 393 Register rd0 = inv_rd(i0);
duke@0 394 Register rd1 = inv_rd(i1);
duke@0 395 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
duke@0 396 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi
duke@0 397 is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
duke@0 398 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
duke@0 399 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
duke@0 400 fatal("not a set_oop");
duke@0 401 }
duke@0 402 }
duke@0 403
duke@0 404
duke@0 405 void NativeMovConstRegPatching::print() {
duke@0 406 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data());
duke@0 407 }
duke@0 408
duke@0 409
duke@0 410 int NativeMovConstRegPatching::data() const {
duke@0 411 #ifdef _LP64
duke@0 412 return data64(addr_at(sethi_offset), long_at(add_offset));
duke@0 413 #else
duke@0 414 return data32(long_at(sethi_offset), long_at(add_offset));
duke@0 415 #endif
duke@0 416 }
duke@0 417
duke@0 418
duke@0 419 void NativeMovConstRegPatching::set_data(int x) {
duke@0 420 #ifdef _LP64
duke@0 421 set_data64_sethi(addr_at(sethi_offset), x);
duke@0 422 #else
duke@0 423 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x));
duke@0 424 #endif
duke@0 425 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
duke@0 426
duke@0 427 // also store the value into an oop_Relocation cell, if any
duke@0 428 CodeBlob* nm = CodeCache::find_blob(instruction_address());
duke@0 429 if (nm != NULL) {
duke@0 430 RelocIterator iter(nm, instruction_address(), next_instruction_address());
duke@0 431 oop* oop_addr = NULL;
duke@0 432 while (iter.next()) {
duke@0 433 if (iter.type() == relocInfo::oop_type) {
duke@0 434 oop_Relocation *r = iter.oop_reloc();
duke@0 435 if (oop_addr == NULL) {
duke@0 436 oop_addr = r->oop_addr();
duke@0 437 *oop_addr = (oop)x;
duke@0 438 } else {
duke@0 439 assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
duke@0 440 }
duke@0 441 }
duke@0 442 }
duke@0 443 }
duke@0 444 }
duke@0 445
duke@0 446
duke@0 447 // Code for unit testing implementation of NativeMovConstRegPatching class
duke@0 448 void NativeMovConstRegPatching::test() {
duke@0 449 #ifdef ASSERT
duke@0 450 ResourceMark rm;
duke@0 451 CodeBuffer cb("test", 100, 100);
duke@0 452 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 453 NativeMovConstRegPatching* nm;
duke@0 454 uint idx;
duke@0 455 int offsets[] = {
duke@0 456 0x0,
duke@0 457 0x7fffffff,
duke@0 458 0x80000000,
duke@0 459 0xffffffff,
duke@0 460 0x20,
duke@0 461 4096,
duke@0 462 4097,
duke@0 463 };
duke@0 464
duke@0 465 VM_Version::allow_all();
duke@0 466
duke@0 467 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none);
duke@0 468 a->nop();
duke@0 469 a->add(I3, low10(0xaaaabbbb), I3);
duke@0 470 a->sethi(0xccccdddd, O2, true, RelocationHolder::none);
duke@0 471 a->nop();
duke@0 472 a->add(O2, low10(0xccccdddd), O2);
duke@0 473
duke@0 474 nm = nativeMovConstRegPatching_at( cb.code_begin() );
duke@0 475 nm->print();
duke@0 476
duke@0 477 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
duke@0 478 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@0 479 nm->set_data( offsets[idx] );
duke@0 480 assert(nm->data() == offsets[idx], "check unit test");
duke@0 481 }
duke@0 482 nm->print();
duke@0 483
duke@0 484 VM_Version::revert();
duke@0 485 #endif // ASSERT
duke@0 486 }
duke@0 487 // End code for unit testing implementation of NativeMovConstRegPatching class
duke@0 488
duke@0 489
duke@0 490 //-------------------------------------------------------------------
duke@0 491
duke@0 492
duke@0 493 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
duke@0 494 Untested("copy_instruction_to");
duke@0 495 int instruction_size = next_instruction_address() - instruction_address();
duke@0 496 for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
duke@0 497 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
duke@0 498 }
duke@0 499 }
duke@0 500
duke@0 501
duke@0 502 void NativeMovRegMem::verify() {
duke@0 503 NativeInstruction::verify();
duke@0 504 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@0 505 int i0 = long_at(0);
duke@0 506 int op3 = inv_op3(i0);
duke@0 507
duke@0 508 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
duke@0 509
duke@0 510 if (!(is_op(i0, Assembler::ldst_op) &&
duke@0 511 inv_immed(i0) &&
duke@0 512 0 != (op3 < op3_ldst_int_limit
duke@0 513 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@0 514 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
duke@0 515 {
duke@0 516 int i1 = long_at(ldst_offset);
duke@0 517 Register rd = inv_rd(i0);
duke@0 518
duke@0 519 op3 = inv_op3(i1);
duke@0 520 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@0 521 0 != (op3 < op3_ldst_int_limit
duke@0 522 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@0 523 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@0 524 fatal("not a ld* or st* op");
duke@0 525 }
duke@0 526 }
duke@0 527 }
duke@0 528
duke@0 529
duke@0 530 void NativeMovRegMem::print() {
duke@0 531 if (is_immediate()) {
duke@0 532 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@0 533 } else {
duke@0 534 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@0 535 }
duke@0 536 }
duke@0 537
duke@0 538
duke@0 539 // Code for unit testing implementation of NativeMovRegMem class
duke@0 540 void NativeMovRegMem::test() {
duke@0 541 #ifdef ASSERT
duke@0 542 ResourceMark rm;
duke@0 543 CodeBuffer cb("test", 1000, 1000);
duke@0 544 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 545 NativeMovRegMem* nm;
duke@0 546 uint idx = 0;
duke@0 547 uint idx1;
duke@0 548 int offsets[] = {
duke@0 549 0x0,
duke@0 550 0xffffffff,
duke@0 551 0x7fffffff,
duke@0 552 0x80000000,
duke@0 553 4096,
duke@0 554 4097,
duke@0 555 0x20,
duke@0 556 0x4000,
duke@0 557 };
duke@0 558
duke@0 559 VM_Version::allow_all();
duke@0 560
duke@0 561 a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
duke@0 562 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 563 a->ldsw( G5, I3, G4 ); idx++;
duke@0 564 a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
duke@0 565 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 566 a->ldsb( G5, I3, G4 ); idx++;
duke@0 567 a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
duke@0 568 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 569 a->ldsh( G5, I3, G4 ); idx++;
duke@0 570 a->lduw( G5, low10(0xffffffff), G4 ); idx++;
duke@0 571 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 572 a->lduw( G5, I3, G4 ); idx++;
duke@0 573 a->ldub( G5, low10(0xffffffff), G4 ); idx++;
duke@0 574 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 575 a->ldub( G5, I3, G4 ); idx++;
duke@0 576 a->lduh( G5, low10(0xffffffff), G4 ); idx++;
duke@0 577 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 578 a->lduh( G5, I3, G4 ); idx++;
duke@0 579 a->ldx( G5, low10(0xffffffff), G4 ); idx++;
duke@0 580 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 581 a->ldx( G5, I3, G4 ); idx++;
duke@0 582 a->ldd( G5, low10(0xffffffff), G4 ); idx++;
duke@0 583 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 584 a->ldd( G5, I3, G4 ); idx++;
duke@0 585 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
duke@0 586 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 587 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@0 588
duke@0 589 a->stw( G5, G4, low10(0xffffffff) ); idx++;
duke@0 590 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 591 a->stw( G5, G4, I3 ); idx++;
duke@0 592 a->stb( G5, G4, low10(0xffffffff) ); idx++;
duke@0 593 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 594 a->stb( G5, G4, I3 ); idx++;
duke@0 595 a->sth( G5, G4, low10(0xffffffff) ); idx++;
duke@0 596 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 597 a->sth( G5, G4, I3 ); idx++;
duke@0 598 a->stx( G5, G4, low10(0xffffffff) ); idx++;
duke@0 599 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 600 a->stx( G5, G4, I3 ); idx++;
duke@0 601 a->std( G5, G4, low10(0xffffffff) ); idx++;
duke@0 602 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 603 a->std( G5, G4, I3 ); idx++;
duke@0 604 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
duke@0 605 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 606 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@0 607
duke@0 608 nm = nativeMovRegMem_at( cb.code_begin() );
duke@0 609 nm->print();
duke@0 610 nm->set_offset( low10(0) );
duke@0 611 nm->print();
duke@0 612 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@0 613 nm->print();
duke@0 614
duke@0 615 while (--idx) {
duke@0 616 nm = nativeMovRegMem_at( nm->next_instruction_address() );
duke@0 617 nm->print();
duke@0 618 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@0 619 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@0 620 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@0 621 "check unit test");
duke@0 622 nm->print();
duke@0 623 }
duke@0 624 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@0 625 nm->print();
duke@0 626 }
duke@0 627
duke@0 628 VM_Version::revert();
duke@0 629 #endif // ASSERT
duke@0 630 }
duke@0 631
duke@0 632 // End code for unit testing implementation of NativeMovRegMem class
duke@0 633
duke@0 634 //--------------------------------------------------------------------------------
duke@0 635
duke@0 636
duke@0 637 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) {
duke@0 638 Untested("copy_instruction_to");
duke@0 639 int instruction_size = next_instruction_address() - instruction_address();
duke@0 640 for (int i = 0; i < instruction_size; i += wordSize) {
duke@0 641 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i);
duke@0 642 }
duke@0 643 }
duke@0 644
duke@0 645
duke@0 646 void NativeMovRegMemPatching::verify() {
duke@0 647 NativeInstruction::verify();
duke@0 648 // make sure code pattern is actually a "ld" or "st" of some sort.
duke@0 649 int i0 = long_at(0);
duke@0 650 int op3 = inv_op3(i0);
duke@0 651
duke@0 652 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@0 653
duke@0 654 if (!(is_op(i0, Assembler::ldst_op) &&
duke@0 655 inv_immed(i0) &&
duke@0 656 0 != (op3 < op3_ldst_int_limit
duke@0 657 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@0 658 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) {
duke@0 659 int i1 = long_at(ldst_offset);
duke@0 660 Register rd = inv_rd(i0);
duke@0 661
duke@0 662 op3 = inv_op3(i1);
duke@0 663 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
duke@0 664 0 != (op3 < op3_ldst_int_limit
duke@0 665 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st)
duke@0 666 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
duke@0 667 fatal("not a ld* or st* op");
duke@0 668 }
duke@0 669 }
duke@0 670 }
duke@0 671
duke@0 672
duke@0 673 void NativeMovRegMemPatching::print() {
duke@0 674 if (is_immediate()) {
duke@0 675 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset());
duke@0 676 } else {
duke@0 677 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address());
duke@0 678 }
duke@0 679 }
duke@0 680
duke@0 681
duke@0 682 // Code for unit testing implementation of NativeMovRegMemPatching class
duke@0 683 void NativeMovRegMemPatching::test() {
duke@0 684 #ifdef ASSERT
duke@0 685 ResourceMark rm;
duke@0 686 CodeBuffer cb("test", 1000, 1000);
duke@0 687 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 688 NativeMovRegMemPatching* nm;
duke@0 689 uint idx = 0;
duke@0 690 uint idx1;
duke@0 691 int offsets[] = {
duke@0 692 0x0,
duke@0 693 0xffffffff,
duke@0 694 0x7fffffff,
duke@0 695 0x80000000,
duke@0 696 4096,
duke@0 697 4097,
duke@0 698 0x20,
duke@0 699 0x4000,
duke@0 700 };
duke@0 701
duke@0 702 VM_Version::allow_all();
duke@0 703
duke@0 704 a->ldsw( G5, low10(0xffffffff), G4 ); idx++;
duke@0 705 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 706 a->ldsw( G5, I3, G4 ); idx++;
duke@0 707 a->ldsb( G5, low10(0xffffffff), G4 ); idx++;
duke@0 708 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 709 a->ldsb( G5, I3, G4 ); idx++;
duke@0 710 a->ldsh( G5, low10(0xffffffff), G4 ); idx++;
duke@0 711 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 712 a->ldsh( G5, I3, G4 ); idx++;
duke@0 713 a->lduw( G5, low10(0xffffffff), G4 ); idx++;
duke@0 714 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 715 a->lduw( G5, I3, G4 ); idx++;
duke@0 716 a->ldub( G5, low10(0xffffffff), G4 ); idx++;
duke@0 717 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 718 a->ldub( G5, I3, G4 ); idx++;
duke@0 719 a->lduh( G5, low10(0xffffffff), G4 ); idx++;
duke@0 720 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 721 a->lduh( G5, I3, G4 ); idx++;
duke@0 722 a->ldx( G5, low10(0xffffffff), G4 ); idx++;
duke@0 723 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 724 a->ldx( G5, I3, G4 ); idx++;
duke@0 725 a->ldd( G5, low10(0xffffffff), G4 ); idx++;
duke@0 726 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 727 a->ldd( G5, I3, G4 ); idx++;
duke@0 728 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
duke@0 729 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 730 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
duke@0 731
duke@0 732 a->stw( G5, G4, low10(0xffffffff) ); idx++;
duke@0 733 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 734 a->stw( G5, G4, I3 ); idx++;
duke@0 735 a->stb( G5, G4, low10(0xffffffff) ); idx++;
duke@0 736 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 737 a->stb( G5, G4, I3 ); idx++;
duke@0 738 a->sth( G5, G4, low10(0xffffffff) ); idx++;
duke@0 739 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 740 a->sth( G5, G4, I3 ); idx++;
duke@0 741 a->stx( G5, G4, low10(0xffffffff) ); idx++;
duke@0 742 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 743 a->stx( G5, G4, I3 ); idx++;
duke@0 744 a->std( G5, G4, low10(0xffffffff) ); idx++;
duke@0 745 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 746 a->std( G5, G4, I3 ); idx++;
duke@0 747 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
duke@0 748 a->sethi(0xaaaabbbb, I3, true, RelocationHolder::none); a->nop(); a->add(I3, low10(0xaaaabbbb), I3);
duke@0 749 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
duke@0 750
duke@0 751 nm = nativeMovRegMemPatching_at( cb.code_begin() );
duke@0 752 nm->print();
duke@0 753 nm->set_offset( low10(0) );
duke@0 754 nm->print();
duke@0 755 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@0 756 nm->print();
duke@0 757
duke@0 758 while (--idx) {
duke@0 759 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() );
duke@0 760 nm->print();
duke@0 761 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
duke@0 762 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
duke@0 763 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
duke@0 764 "check unit test");
duke@0 765 nm->print();
duke@0 766 }
duke@0 767 nm->add_offset_in_bytes( low10(0xbb) * wordSize );
duke@0 768 nm->print();
duke@0 769 }
duke@0 770
duke@0 771 VM_Version::revert();
duke@0 772 #endif // ASSERT
duke@0 773 }
duke@0 774 // End code for unit testing implementation of NativeMovRegMemPatching class
duke@0 775
duke@0 776
duke@0 777 //--------------------------------------------------------------------------------
duke@0 778
duke@0 779
duke@0 780 void NativeJump::verify() {
duke@0 781 NativeInstruction::verify();
duke@0 782 int i0 = long_at(sethi_offset);
duke@0 783 int i1 = long_at(jmpl_offset);
duke@0 784 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
duke@0 785 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg"
duke@0 786 Register rd = inv_rd(i0);
duke@0 787 #ifndef _LP64
duke@0 788 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 &&
duke@0 789 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) ||
duke@0 790 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) &&
duke@0 791 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) &&
duke@0 792 rd == inv_rs1(i1))) {
duke@0 793 fatal("not a jump_to instruction");
duke@0 794 }
duke@0 795 #else
duke@0 796 // In LP64, the jump instruction location varies for non relocatable
duke@0 797 // jumps, for example is could be sethi, xor, jmp instead of the
duke@0 798 // 7 instructions for sethi. So let's check sethi only.
duke@0 799 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
duke@0 800 fatal("not a jump_to instruction");
duke@0 801 }
duke@0 802 #endif
duke@0 803 }
duke@0 804
duke@0 805
duke@0 806 void NativeJump::print() {
duke@0 807 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination());
duke@0 808 }
duke@0 809
duke@0 810
duke@0 811 // Code for unit testing implementation of NativeJump class
duke@0 812 void NativeJump::test() {
duke@0 813 #ifdef ASSERT
duke@0 814 ResourceMark rm;
duke@0 815 CodeBuffer cb("test", 100, 100);
duke@0 816 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 817 NativeJump* nj;
duke@0 818 uint idx;
duke@0 819 int offsets[] = {
duke@0 820 0x0,
duke@0 821 0xffffffff,
duke@0 822 0x7fffffff,
duke@0 823 0x80000000,
duke@0 824 4096,
duke@0 825 4097,
duke@0 826 0x20,
duke@0 827 0x4000,
duke@0 828 };
duke@0 829
duke@0 830 VM_Version::allow_all();
duke@0 831
duke@0 832 a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
duke@0 833 a->jmpl(I3, low10(0x7fffbbbb), G0, RelocationHolder::none);
duke@0 834 a->delayed()->nop();
duke@0 835 a->sethi(0x7fffbbbb, I3, true, RelocationHolder::none);
duke@0 836 a->jmpl(I3, low10(0x7fffbbbb), L3, RelocationHolder::none);
duke@0 837 a->delayed()->nop();
duke@0 838
duke@0 839 nj = nativeJump_at( cb.code_begin() );
duke@0 840 nj->print();
duke@0 841
duke@0 842 nj = nativeJump_at( nj->next_instruction_address() );
duke@0 843 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
duke@0 844 nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
duke@0 845 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
duke@0 846 nj->print();
duke@0 847 }
duke@0 848
duke@0 849 VM_Version::revert();
duke@0 850 #endif // ASSERT
duke@0 851 }
duke@0 852 // End code for unit testing implementation of NativeJump class
duke@0 853
duke@0 854
duke@0 855 void NativeJump::insert(address code_pos, address entry) {
duke@0 856 Unimplemented();
duke@0 857 }
duke@0 858
duke@0 859 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
duke@0 860 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
duke@0 861 // Atomic write can be only with 1 word.
duke@0 862 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
duke@0 863 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere
duke@0 864 // in the header of the nmethod, within a short branch's span of the patch point.
duke@0 865 // Set up the jump sequence using NativeJump::insert, and then use an annulled
duke@0 866 // unconditional branch at the target site (an atomic 1-word update).
duke@0 867 // Limitations: You can only patch nmethods, with any given nmethod patched at
duke@0 868 // most once, and the patch must be in the nmethod's header.
duke@0 869 // It's messy, but you can ask the CodeCache for the nmethod containing the
duke@0 870 // target address.
duke@0 871
duke@0 872 // %%%%% For now, do something MT-stupid:
duke@0 873 ResourceMark rm;
duke@0 874 int code_size = 1 * BytesPerInstWord;
duke@0 875 CodeBuffer cb(verified_entry, code_size + 1);
duke@0 876 MacroAssembler* a = new MacroAssembler(&cb);
duke@0 877 if (VM_Version::v9_instructions_work()) {
duke@0 878 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
duke@0 879 } else {
duke@0 880 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
duke@0 881 }
duke@0 882 ICache::invalidate_range(verified_entry, code_size);
duke@0 883 }
duke@0 884
duke@0 885
duke@0 886 void NativeIllegalInstruction::insert(address code_pos) {
duke@0 887 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
duke@0 888 nii->set_long_at(0, illegal_instruction());
duke@0 889 }
duke@0 890
duke@0 891 static int illegal_instruction_bits = 0;
duke@0 892
duke@0 893 int NativeInstruction::illegal_instruction() {
duke@0 894 if (illegal_instruction_bits == 0) {
duke@0 895 ResourceMark rm;
duke@0 896 char buf[40];
duke@0 897 CodeBuffer cbuf((address)&buf[0], 20);
duke@0 898 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@0 899 address ia = a->pc();
duke@0 900 a->trap(ST_RESERVED_FOR_USER_0 + 1);
duke@0 901 int bits = *(int*)ia;
duke@0 902 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@0 903 illegal_instruction_bits = bits;
duke@0 904 assert(illegal_instruction_bits != 0, "oops");
duke@0 905 }
duke@0 906 return illegal_instruction_bits;
duke@0 907 }
duke@0 908
duke@0 909 static int ic_miss_trap_bits = 0;
duke@0 910
duke@0 911 bool NativeInstruction::is_ic_miss_trap() {
duke@0 912 if (ic_miss_trap_bits == 0) {
duke@0 913 ResourceMark rm;
duke@0 914 char buf[40];
duke@0 915 CodeBuffer cbuf((address)&buf[0], 20);
duke@0 916 MacroAssembler* a = new MacroAssembler(&cbuf);
duke@0 917 address ia = a->pc();
duke@0 918 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
duke@0 919 int bits = *(int*)ia;
duke@0 920 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
duke@0 921 ic_miss_trap_bits = bits;
duke@0 922 assert(ic_miss_trap_bits != 0, "oops");
duke@0 923 }
duke@0 924 return long_at(0) == ic_miss_trap_bits;
duke@0 925 }
duke@0 926
duke@0 927
duke@0 928 bool NativeInstruction::is_illegal() {
duke@0 929 if (illegal_instruction_bits == 0) {
duke@0 930 return false;
duke@0 931 }
duke@0 932 return long_at(0) == illegal_instruction_bits;
duke@0 933 }
duke@0 934
duke@0 935
duke@0 936 void NativeGeneralJump::verify() {
duke@0 937 assert(((NativeInstruction *)this)->is_jump() ||
duke@0 938 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
duke@0 939 }
duke@0 940
duke@0 941
duke@0 942 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
duke@0 943 Assembler::Condition condition = Assembler::always;
duke@0 944 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
duke@0 945 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
duke@0 946 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
duke@0 947 ni->set_long_at(0, x);
duke@0 948 }
duke@0 949
duke@0 950
duke@0 951 // MT-safe patching of a jmp instruction (and following word).
duke@0 952 // First patches the second word, and then atomicly replaces
duke@0 953 // the first word with the first new instruction word.
duke@0 954 // Other processors might briefly see the old first word
duke@0 955 // followed by the new second word. This is OK if the old
duke@0 956 // second word is harmless, and the new second word may be
duke@0 957 // harmlessly executed in the delay slot of the call.
duke@0 958 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
duke@0 959 assert(Patching_lock->is_locked() ||
duke@0 960 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
duke@0 961 assert (instr_addr != NULL, "illegal address for code patching");
duke@0 962 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call
duke@0 963 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
duke@0 964 int i0 = ((int*)code_buffer)[0];
duke@0 965 int i1 = ((int*)code_buffer)[1];
duke@0 966 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
duke@0 967 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@0 968 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@0 969 "must not interfere with original call");
duke@0 970 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
duke@0 971 h_jump->set_long_at(1*BytesPerInstWord, i1);
duke@0 972 h_jump->set_long_at(0*BytesPerInstWord, i0);
duke@0 973 // NOTE: It is possible that another thread T will execute
duke@0 974 // only the second patched word.
duke@0 975 // In other words, since the original instruction is this
duke@0 976 // jmp patching_stub; nop (NativeGeneralJump)
duke@0 977 // and the new sequence from the buffer is this:
duke@0 978 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg)
duke@0 979 // what T will execute is this:
duke@0 980 // jmp patching_stub; add %r, %lo(K), %r
duke@0 981 // thereby putting garbage into %r before calling the patching stub.
duke@0 982 // This is OK, because the patching stub ignores the value of %r.
duke@0 983
duke@0 984 // Make sure the first-patched instruction, which may co-exist
duke@0 985 // briefly with the call, will do something harmless.
duke@0 986 assert(inv_op(*contention_addr) == Assembler::arith_op ||
duke@0 987 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
duke@0 988 "must not interfere with original call");
duke@0 989 }