annotate src/cpu/x86/vm/nativeInst_x86.hpp @ 13274:c044f8d03932

8182299: Enable disabled clang warnings, build on OSX 10 + Xcode 8 8182656: Make the required changes in GC code to build on OSX 10 + Xcode 8 8182657: Make the required changes in Runtime code to build on OSX 10 + Xcode 8 8182658: Make the required changes in Compiler code to build on OSX 10 + Xcode 8 Reviewed-by: jwilhelm, ehelin, phh Contributed-by: phh <hohensee@amazon.com>, jwilhelm <jesper.wilhelmsson@oracle.com>
author jwilhelm
date Thu, 06 Jul 2017 01:50:26 +0200
parents 28e7bb59323e
children
rev   line source
duke@0 1 /*
jwilhelm@13274 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #ifndef CPU_X86_VM_NATIVEINST_X86_HPP
stefank@1879 26 #define CPU_X86_VM_NATIVEINST_X86_HPP
stefank@1879 27
stefank@1879 28 #include "asm/assembler.hpp"
stefank@1879 29 #include "memory/allocation.hpp"
stefank@1879 30 #include "runtime/icache.hpp"
stefank@1879 31 #include "runtime/os.hpp"
stefank@1879 32
duke@0 33 // We have interfaces for the following instructions:
duke@0 34 // - NativeInstruction
duke@0 35 // - - NativeCall
duke@0 36 // - - NativeMovConstReg
duke@0 37 // - - NativeMovConstRegPatching
duke@0 38 // - - NativeMovRegMem
duke@0 39 // - - NativeMovRegMemPatching
duke@0 40 // - - NativeJump
kvn@12408 41 // - - NativeFarJump
duke@0 42 // - - NativeIllegalOpCode
duke@0 43 // - - NativeGeneralJump
duke@0 44 // - - NativeReturn
duke@0 45 // - - NativeReturnX (return with argument)
duke@0 46 // - - NativePushConst
duke@0 47 // - - NativeTstRegMem
duke@0 48
duke@0 49 // The base class for different kinds of native instruction abstractions.
duke@0 50 // Provides the primitive operations to manipulate code relative to this.
duke@0 51
duke@0 52 class NativeInstruction VALUE_OBJ_CLASS_SPEC {
duke@0 53 friend class Relocation;
duke@0 54
duke@0 55 public:
duke@0 56 enum Intel_specific_constants {
duke@0 57 nop_instruction_code = 0x90,
duke@0 58 nop_instruction_size = 1
duke@0 59 };
duke@0 60
duke@0 61 bool is_nop() { return ubyte_at(0) == nop_instruction_code; }
duke@0 62 inline bool is_call();
twisti@9111 63 inline bool is_call_reg();
duke@0 64 inline bool is_illegal();
duke@0 65 inline bool is_return();
duke@0 66 inline bool is_jump();
kvn@12408 67 inline bool is_jump_reg();
kvn@12408 68 inline bool is_far_jump();
duke@0 69 inline bool is_cond_jump();
duke@0 70 inline bool is_safepoint_poll();
duke@0 71 inline bool is_mov_literal64();
duke@0 72
duke@0 73 protected:
duke@0 74 address addr_at(int offset) const { return address(this) + offset; }
duke@0 75
duke@0 76 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
duke@0 77 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
duke@0 78
duke@0 79 jint int_at(int offset) const { return *(jint*) addr_at(offset); }
duke@0 80
duke@0 81 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
duke@0 82
duke@0 83 oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
duke@0 84
duke@0 85
duke@0 86 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); }
duke@0 87 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); }
duke@0 88 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); }
duke@0 89 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); }
duke@0 90
duke@0 91 // This doesn't really do anything on Intel, but it is the place where
duke@0 92 // cache invalidation belongs, generically:
duke@0 93 void wrote(int offset);
duke@0 94
duke@0 95 public:
duke@0 96
duke@0 97 // unit test stuff
duke@0 98 static void test() {} // override for testing
duke@0 99
duke@0 100 inline friend NativeInstruction* nativeInstruction_at(address address);
duke@0 101 };
duke@0 102
duke@0 103 inline NativeInstruction* nativeInstruction_at(address address) {
duke@0 104 NativeInstruction* inst = (NativeInstruction*)address;
duke@0 105 #ifdef ASSERT
duke@0 106 //inst->verify();
duke@0 107 #endif
duke@0 108 return inst;
duke@0 109 }
duke@0 110
kvn@12408 111 class NativePltCall: public NativeInstruction {
kvn@12408 112 public:
kvn@12408 113 enum Intel_specific_constants {
kvn@12408 114 instruction_code = 0xE8,
kvn@12408 115 instruction_size = 5,
kvn@12408 116 instruction_offset = 0,
kvn@12408 117 displacement_offset = 1,
kvn@12408 118 return_address_offset = 5
kvn@12408 119 };
kvn@12408 120 address instruction_address() const { return addr_at(instruction_offset); }
kvn@12408 121 address next_instruction_address() const { return addr_at(return_address_offset); }
kvn@12408 122 address displacement_address() const { return addr_at(displacement_offset); }
kvn@12408 123 int displacement() const { return (jint) int_at(displacement_offset); }
kvn@12408 124 address return_address() const { return addr_at(return_address_offset); }
kvn@12408 125 address destination() const;
kvn@12408 126 address plt_entry() const;
kvn@12408 127 address plt_jump() const;
kvn@12408 128 address plt_load_got() const;
kvn@12408 129 address plt_resolve_call() const;
kvn@12408 130 address plt_c2i_stub() const;
kvn@12408 131 void set_stub_to_clean();
kvn@12408 132
kvn@12408 133 void reset_to_plt_resolve_call();
kvn@12408 134 void set_destination_mt_safe(address dest);
kvn@12408 135
kvn@12408 136 void verify() const;
kvn@12408 137 };
kvn@12408 138
kvn@12408 139 inline NativePltCall* nativePltCall_at(address address) {
kvn@12408 140 NativePltCall* call = (NativePltCall*) address;
kvn@12408 141 #ifdef ASSERT
kvn@12408 142 call->verify();
kvn@12408 143 #endif
kvn@12408 144 return call;
kvn@12408 145 }
kvn@12408 146
kvn@12408 147 inline NativePltCall* nativePltCall_before(address addr) {
kvn@12408 148 address at = addr - NativePltCall::instruction_size;
kvn@12408 149 return nativePltCall_at(at);
kvn@12408 150 }
kvn@12408 151
duke@0 152 inline NativeCall* nativeCall_at(address address);
duke@0 153 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
duke@0 154 // instructions (used to manipulate inline caches, primitive & dll calls, etc.).
duke@0 155
duke@0 156 class NativeCall: public NativeInstruction {
duke@0 157 public:
duke@0 158 enum Intel_specific_constants {
duke@0 159 instruction_code = 0xE8,
duke@0 160 instruction_size = 5,
duke@0 161 instruction_offset = 0,
duke@0 162 displacement_offset = 1,
duke@0 163 return_address_offset = 5
duke@0 164 };
duke@0 165
duke@0 166 enum { cache_line_size = BytesPerWord }; // conservative estimate!
duke@0 167
duke@0 168 address instruction_address() const { return addr_at(instruction_offset); }
duke@0 169 address next_instruction_address() const { return addr_at(return_address_offset); }
duke@0 170 int displacement() const { return (jint) int_at(displacement_offset); }
duke@0 171 address displacement_address() const { return addr_at(displacement_offset); }
duke@0 172 address return_address() const { return addr_at(return_address_offset); }
duke@0 173 address destination() const;
duke@0 174 void set_destination(address dest) {
duke@0 175 #ifdef AMD64
kvn@12408 176 intptr_t disp = dest - return_address();
kvn@12408 177 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
duke@0 178 #endif // AMD64
duke@0 179 set_int_at(displacement_offset, dest - return_address());
duke@0 180 }
duke@0 181 void set_destination_mt_safe(address dest);
duke@0 182
duke@0 183 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
duke@0 184 void verify();
duke@0 185 void print();
duke@0 186
duke@0 187 // Creation
duke@0 188 inline friend NativeCall* nativeCall_at(address address);
duke@0 189 inline friend NativeCall* nativeCall_before(address return_address);
duke@0 190
duke@0 191 static bool is_call_at(address instr) {
duke@0 192 return ((*instr) & 0xFF) == NativeCall::instruction_code;
duke@0 193 }
duke@0 194
duke@0 195 static bool is_call_before(address return_address) {
duke@0 196 return is_call_at(return_address - NativeCall::return_address_offset);
duke@0 197 }
duke@0 198
duke@0 199 static bool is_call_to(address instr, address target) {
duke@0 200 return nativeInstruction_at(instr)->is_call() &&
duke@0 201 nativeCall_at(instr)->destination() == target;
duke@0 202 }
duke@0 203
kvn@12408 204 #if INCLUDE_AOT
kvn@12408 205 static bool is_far_call(address instr, address target) {
kvn@12408 206 intptr_t disp = target - (instr + sizeof(int32_t));
kvn@12408 207 return !Assembler::is_simm32(disp);
kvn@12408 208 }
kvn@12408 209 #endif
kvn@12408 210
duke@0 211 // MT-safe patching of a call instruction.
duke@0 212 static void insert(address code_pos, address entry);
duke@0 213
duke@0 214 static void replace_mt_safe(address instr_addr, address code_buffer);
duke@0 215 };
duke@0 216
duke@0 217 inline NativeCall* nativeCall_at(address address) {
duke@0 218 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
duke@0 219 #ifdef ASSERT
duke@0 220 call->verify();
duke@0 221 #endif
duke@0 222 return call;
duke@0 223 }
duke@0 224
duke@0 225 inline NativeCall* nativeCall_before(address return_address) {
duke@0 226 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
duke@0 227 #ifdef ASSERT
duke@0 228 call->verify();
duke@0 229 #endif
duke@0 230 return call;
duke@0 231 }
duke@0 232
twisti@9111 233 class NativeCallReg: public NativeInstruction {
twisti@9111 234 public:
twisti@9111 235 enum Intel_specific_constants {
twisti@9111 236 instruction_code = 0xFF,
twisti@9111 237 instruction_offset = 0,
twisti@9111 238 return_address_offset_norex = 2,
twisti@9111 239 return_address_offset_rex = 3
twisti@9111 240 };
twisti@9111 241
twisti@9111 242 int next_instruction_offset() const {
twisti@9111 243 if (ubyte_at(0) == NativeCallReg::instruction_code) {
twisti@9111 244 return return_address_offset_norex;
twisti@9111 245 } else {
twisti@9111 246 return return_address_offset_rex;
twisti@9111 247 }
twisti@9111 248 }
twisti@9111 249 };
twisti@9111 250
duke@0 251 // An interface for accessing/manipulating native mov reg, imm32 instructions.
duke@0 252 // (used to manipulate inlined 32bit data dll calls, etc.)
duke@0 253 class NativeMovConstReg: public NativeInstruction {
duke@0 254 #ifdef AMD64
duke@0 255 static const bool has_rex = true;
duke@0 256 static const int rex_size = 1;
duke@0 257 #else
duke@0 258 static const bool has_rex = false;
duke@0 259 static const int rex_size = 0;
duke@0 260 #endif // AMD64
duke@0 261 public:
duke@0 262 enum Intel_specific_constants {
duke@0 263 instruction_code = 0xB8,
duke@0 264 instruction_size = 1 + rex_size + wordSize,
duke@0 265 instruction_offset = 0,
duke@0 266 data_offset = 1 + rex_size,
duke@0 267 next_instruction_offset = instruction_size,
duke@0 268 register_mask = 0x07
duke@0 269 };
duke@0 270
duke@0 271 address instruction_address() const { return addr_at(instruction_offset); }
duke@0 272 address next_instruction_address() const { return addr_at(next_instruction_offset); }
duke@0 273 intptr_t data() const { return ptr_at(data_offset); }
duke@0 274 void set_data(intptr_t x) { set_ptr_at(data_offset, x); }
duke@0 275
duke@0 276 void verify();
duke@0 277 void print();
duke@0 278
duke@0 279 // unit test stuff
duke@0 280 static void test() {}
duke@0 281
duke@0 282 // Creation
duke@0 283 inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
duke@0 284 inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
duke@0 285 };
duke@0 286
duke@0 287 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
duke@0 288 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
duke@0 289 #ifdef ASSERT
duke@0 290 test->verify();
duke@0 291 #endif
duke@0 292 return test;
duke@0 293 }
duke@0 294
duke@0 295 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
duke@0 296 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
duke@0 297 #ifdef ASSERT
duke@0 298 test->verify();
duke@0 299 #endif
duke@0 300 return test;
duke@0 301 }
duke@0 302
duke@0 303 class NativeMovConstRegPatching: public NativeMovConstReg {
duke@0 304 private:
duke@0 305 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
duke@0 306 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
duke@0 307 #ifdef ASSERT
duke@0 308 test->verify();
duke@0 309 #endif
duke@0 310 return test;
duke@0 311 }
duke@0 312 };
duke@0 313
duke@0 314 // An interface for accessing/manipulating native moves of the form:
never@304 315 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
never@304 316 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
never@304 317 // mov[s/z]x[w/b/q] [reg + offset], reg
duke@0 318 // fld_s [reg+offset]
duke@0 319 // fld_d [reg+offset]
duke@0 320 // fstp_s [reg + offset]
duke@0 321 // fstp_d [reg + offset]
never@304 322 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
duke@0 323 //
duke@0 324 // Warning: These routines must be able to handle any instruction sequences
duke@0 325 // that are generated as a result of the load/store byte,word,long
duke@0 326 // macros. For example: The load_unsigned_byte instruction generates
duke@0 327 // an xor reg,reg inst prior to generating the movb instruction. This
duke@0 328 // class must skip the xor instruction.
duke@0 329
duke@0 330 class NativeMovRegMem: public NativeInstruction {
duke@0 331 public:
duke@0 332 enum Intel_specific_constants {
never@304 333 instruction_prefix_wide_lo = Assembler::REX,
never@304 334 instruction_prefix_wide_hi = Assembler::REX_WRXB,
duke@0 335 instruction_code_xor = 0x33,
duke@0 336 instruction_extended_prefix = 0x0F,
never@304 337 instruction_code_mem2reg_movslq = 0x63,
duke@0 338 instruction_code_mem2reg_movzxb = 0xB6,
duke@0 339 instruction_code_mem2reg_movsxb = 0xBE,
duke@0 340 instruction_code_mem2reg_movzxw = 0xB7,
duke@0 341 instruction_code_mem2reg_movsxw = 0xBF,
duke@0 342 instruction_operandsize_prefix = 0x66,
never@304 343 instruction_code_reg2mem = 0x89,
never@304 344 instruction_code_mem2reg = 0x8b,
duke@0 345 instruction_code_reg2memb = 0x88,
duke@0 346 instruction_code_mem2regb = 0x8a,
duke@0 347 instruction_code_float_s = 0xd9,
duke@0 348 instruction_code_float_d = 0xdd,
duke@0 349 instruction_code_long_volatile = 0xdf,
duke@0 350 instruction_code_xmm_ss_prefix = 0xf3,
duke@0 351 instruction_code_xmm_sd_prefix = 0xf2,
duke@0 352 instruction_code_xmm_code = 0x0f,
duke@0 353 instruction_code_xmm_load = 0x10,
duke@0 354 instruction_code_xmm_store = 0x11,
duke@0 355 instruction_code_xmm_lpd = 0x12,
duke@0 356
kvn@2953 357 instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
kvn@2953 358 instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
thartmann@12864 359 instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes,
kvn@2953 360
duke@0 361 instruction_size = 4,
duke@0 362 instruction_offset = 0,
duke@0 363 data_offset = 2,
duke@0 364 next_instruction_offset = 4
duke@0 365 };
duke@0 366
never@304 367 // helper
never@304 368 int instruction_start() const;
duke@0 369
never@304 370 address instruction_address() const;
duke@0 371
never@304 372 address next_instruction_address() const;
never@304 373
never@304 374 int offset() const;
never@304 375
never@304 376 void set_offset(int x);
duke@0 377
duke@0 378 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
duke@0 379
duke@0 380 void verify();
duke@0 381 void print ();
duke@0 382
duke@0 383 // unit test stuff
duke@0 384 static void test() {}
duke@0 385
duke@0 386 private:
duke@0 387 inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
duke@0 388 };
duke@0 389
duke@0 390 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
duke@0 391 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
duke@0 392 #ifdef ASSERT
duke@0 393 test->verify();
duke@0 394 #endif
duke@0 395 return test;
duke@0 396 }
duke@0 397
duke@0 398
duke@0 399 // An interface for accessing/manipulating native leal instruction of form:
duke@0 400 // leal reg, [reg + offset]
duke@0 401
duke@0 402 class NativeLoadAddress: public NativeMovRegMem {
never@304 403 #ifdef AMD64
never@304 404 static const bool has_rex = true;
never@304 405 static const int rex_size = 1;
never@304 406 #else
never@304 407 static const bool has_rex = false;
never@304 408 static const int rex_size = 0;
never@304 409 #endif // AMD64
duke@0 410 public:
duke@0 411 enum Intel_specific_constants {
never@304 412 instruction_prefix_wide = Assembler::REX_W,
never@304 413 instruction_prefix_wide_extended = Assembler::REX_WB,
never@304 414 lea_instruction_code = 0x8D,
never@304 415 mov64_instruction_code = 0xB8
duke@0 416 };
duke@0 417
duke@0 418 void verify();
duke@0 419 void print ();
duke@0 420
duke@0 421 // unit test stuff
duke@0 422 static void test() {}
duke@0 423
duke@0 424 private:
duke@0 425 friend NativeLoadAddress* nativeLoadAddress_at (address address) {
duke@0 426 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
duke@0 427 #ifdef ASSERT
duke@0 428 test->verify();
duke@0 429 #endif
duke@0 430 return test;
duke@0 431 }
duke@0 432 };
duke@0 433
kvn@12408 434 // destination is rbx or rax
kvn@12408 435 // mov rbx, [rip + offset]
kvn@12408 436 class NativeLoadGot: public NativeInstruction {
kvn@12408 437 #ifdef AMD64
kvn@12408 438 static const bool has_rex = true;
kvn@12408 439 static const int rex_size = 1;
kvn@12408 440 #else
kvn@12408 441 static const bool has_rex = false;
kvn@12408 442 static const int rex_size = 0;
kvn@12408 443 #endif
kvn@12408 444 public:
kvn@12408 445 enum Intel_specific_constants {
kvn@12408 446 rex_prefix = 0x48,
kvn@12408 447 instruction_code = 0x8b,
kvn@12408 448 modrm_rbx_code = 0x1d,
kvn@12408 449 modrm_rax_code = 0x05,
kvn@12408 450 instruction_length = 6 + rex_size,
kvn@12408 451 offset_offset = 2 + rex_size
kvn@12408 452 };
kvn@12408 453
kvn@12408 454 address instruction_address() const { return addr_at(0); }
kvn@12408 455 address rip_offset_address() const { return addr_at(offset_offset); }
kvn@12408 456 int rip_offset() const { return int_at(offset_offset); }
kvn@12408 457 address return_address() const { return addr_at(instruction_length); }
kvn@12408 458 address got_address() const { return return_address() + rip_offset(); }
kvn@12408 459 address next_instruction_address() const { return return_address(); }
kvn@12408 460 intptr_t data() const;
kvn@12408 461 void set_data(intptr_t data) {
kvn@12408 462 intptr_t *addr = (intptr_t *) got_address();
kvn@12408 463 *addr = data;
kvn@12408 464 }
kvn@12408 465
kvn@12408 466 void verify() const;
kvn@12408 467 private:
kvn@12408 468 void report_and_fail() const;
kvn@12408 469 };
kvn@12408 470
kvn@12408 471 inline NativeLoadGot* nativeLoadGot_at(address addr) {
kvn@12408 472 NativeLoadGot* load = (NativeLoadGot*) addr;
kvn@12408 473 #ifdef ASSERT
kvn@12408 474 load->verify();
kvn@12408 475 #endif
kvn@12408 476 return load;
kvn@12408 477 }
kvn@12408 478
duke@0 479 // jump rel32off
duke@0 480
duke@0 481 class NativeJump: public NativeInstruction {
duke@0 482 public:
duke@0 483 enum Intel_specific_constants {
duke@0 484 instruction_code = 0xe9,
duke@0 485 instruction_size = 5,
duke@0 486 instruction_offset = 0,
duke@0 487 data_offset = 1,
duke@0 488 next_instruction_offset = 5
duke@0 489 };
duke@0 490
duke@0 491 address instruction_address() const { return addr_at(instruction_offset); }
duke@0 492 address next_instruction_address() const { return addr_at(next_instruction_offset); }
duke@0 493 address jump_destination() const {
duke@0 494 address dest = (int_at(data_offset)+next_instruction_address());
never@304 495 // 32bit used to encode unresolved jmp as jmp -1
never@304 496 // 64bit can't produce this so it used jump to self.
never@304 497 // Now 32bit and 64bit use jump to self as the unresolved address
never@304 498 // which the inline cache code (and relocs) know about
never@304 499
duke@0 500 // return -1 if jump to self
duke@0 501 dest = (dest == (address) this) ? (address) -1 : dest;
duke@0 502 return dest;
duke@0 503 }
duke@0 504
duke@0 505 void set_jump_destination(address dest) {
duke@0 506 intptr_t val = dest - next_instruction_address();
never@314 507 if (dest == (address) -1) {
never@314 508 val = -5; // jump to self
never@314 509 }
duke@0 510 #ifdef AMD64
never@304 511 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
duke@0 512 #endif // AMD64
duke@0 513 set_int_at(data_offset, (jint)val);
duke@0 514 }
duke@0 515
duke@0 516 // Creation
duke@0 517 inline friend NativeJump* nativeJump_at(address address);
duke@0 518
duke@0 519 void verify();
duke@0 520
duke@0 521 // Unit testing stuff
duke@0 522 static void test() {}
duke@0 523
duke@0 524 // Insertion of native jump instruction
duke@0 525 static void insert(address code_pos, address entry);
duke@0 526 // MT-safe insertion of native jump at verified method entry
duke@0 527 static void check_verified_entry_alignment(address entry, address verified_entry);
duke@0 528 static void patch_verified_entry(address entry, address verified_entry, address dest);
duke@0 529 };
duke@0 530
duke@0 531 inline NativeJump* nativeJump_at(address address) {
duke@0 532 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
duke@0 533 #ifdef ASSERT
duke@0 534 jump->verify();
duke@0 535 #endif
duke@0 536 return jump;
duke@0 537 }
duke@0 538
kvn@12408 539 // far jump reg
kvn@12408 540 class NativeFarJump: public NativeInstruction {
kvn@12408 541 public:
kvn@12408 542 address jump_destination() const;
kvn@12408 543
kvn@12408 544 // Creation
kvn@12408 545 inline friend NativeFarJump* nativeFarJump_at(address address);
kvn@12408 546
kvn@12408 547 void verify();
kvn@12408 548
kvn@12408 549 // Unit testing stuff
kvn@12408 550 static void test() {}
kvn@12408 551
kvn@12408 552 };
kvn@12408 553
kvn@12408 554 inline NativeFarJump* nativeFarJump_at(address address) {
kvn@12408 555 NativeFarJump* jump = (NativeFarJump*)(address);
kvn@12408 556 #ifdef ASSERT
kvn@12408 557 jump->verify();
kvn@12408 558 #endif
kvn@12408 559 return jump;
kvn@12408 560 }
kvn@12408 561
duke@0 562 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional
duke@0 563 class NativeGeneralJump: public NativeInstruction {
duke@0 564 public:
duke@0 565 enum Intel_specific_constants {
duke@0 566 // Constants does not apply, since the lengths and offsets depends on the actual jump
duke@0 567 // used
duke@0 568 // Instruction codes:
duke@0 569 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off)
duke@0 570 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off)
duke@0 571 unconditional_long_jump = 0xe9,
duke@0 572 unconditional_short_jump = 0xeb,
duke@0 573 instruction_size = 5
duke@0 574 };
duke@0 575
duke@0 576 address instruction_address() const { return addr_at(0); }
duke@0 577 address jump_destination() const;
duke@0 578
duke@0 579 // Creation
duke@0 580 inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
duke@0 581
duke@0 582 // Insertion of native general jump instruction
duke@0 583 static void insert_unconditional(address code_pos, address entry);
duke@0 584 static void replace_mt_safe(address instr_addr, address code_buffer);
duke@0 585
duke@0 586 void verify();
duke@0 587 };
duke@0 588
duke@0 589 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
duke@0 590 NativeGeneralJump* jump = (NativeGeneralJump*)(address);
duke@0 591 debug_only(jump->verify();)
duke@0 592 return jump;
duke@0 593 }
duke@0 594
kvn@12408 595 class NativeGotJump: public NativeInstruction {
kvn@12408 596 public:
kvn@12408 597 enum Intel_specific_constants {
kvn@12408 598 instruction_code = 0xff,
kvn@12408 599 instruction_offset = 0,
kvn@12408 600 instruction_size = 6,
kvn@12408 601 rip_offset = 2
kvn@12408 602 };
kvn@12408 603
kvn@12408 604 void verify() const;
kvn@12408 605 address instruction_address() const { return addr_at(instruction_offset); }
kvn@12408 606 address destination() const;
kvn@12408 607 address return_address() const { return addr_at(instruction_size); }
kvn@12408 608 int got_offset() const { return (jint) int_at(rip_offset); }
kvn@12408 609 address got_address() const { return return_address() + got_offset(); }
kvn@12408 610 address next_instruction_address() const { return addr_at(instruction_size); }
kvn@12408 611 bool is_GotJump() const { return ubyte_at(0) == instruction_code; }
kvn@12408 612
kvn@12408 613 void set_jump_destination(address dest) {
kvn@12408 614 address *got_entry = (address *) got_address();
kvn@12408 615 *got_entry = dest;
kvn@12408 616 }
kvn@12408 617 };
kvn@12408 618
kvn@12408 619 inline NativeGotJump* nativeGotJump_at(address addr) {
kvn@12408 620 NativeGotJump* jump = (NativeGotJump*)(addr);
kvn@12408 621 debug_only(jump->verify());
kvn@12408 622 return jump;
kvn@12408 623 }
kvn@12408 624
duke@0 625 class NativePopReg : public NativeInstruction {
duke@0 626 public:
duke@0 627 enum Intel_specific_constants {
duke@0 628 instruction_code = 0x58,
duke@0 629 instruction_size = 1,
duke@0 630 instruction_offset = 0,
duke@0 631 data_offset = 1,
duke@0 632 next_instruction_offset = 1
duke@0 633 };
duke@0 634
duke@0 635 // Insert a pop instruction
duke@0 636 static void insert(address code_pos, Register reg);
duke@0 637 };
duke@0 638
duke@0 639
duke@0 640 class NativeIllegalInstruction: public NativeInstruction {
duke@0 641 public:
duke@0 642 enum Intel_specific_constants {
duke@0 643 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B
duke@0 644 instruction_size = 2,
duke@0 645 instruction_offset = 0,
duke@0 646 next_instruction_offset = 2
duke@0 647 };
duke@0 648
duke@0 649 // Insert illegal opcode as specific address
duke@0 650 static void insert(address code_pos);
duke@0 651 };
duke@0 652
duke@0 653 // return instruction that does not pop values of the stack
duke@0 654 class NativeReturn: public NativeInstruction {
duke@0 655 public:
duke@0 656 enum Intel_specific_constants {
duke@0 657 instruction_code = 0xC3,
duke@0 658 instruction_size = 1,
duke@0 659 instruction_offset = 0,
duke@0 660 next_instruction_offset = 1
duke@0 661 };
duke@0 662 };
duke@0 663
duke@0 664 // return instruction that does pop values of the stack
duke@0 665 class NativeReturnX: public NativeInstruction {
duke@0 666 public:
duke@0 667 enum Intel_specific_constants {
duke@0 668 instruction_code = 0xC2,
duke@0 669 instruction_size = 2,
duke@0 670 instruction_offset = 0,
duke@0 671 next_instruction_offset = 2
duke@0 672 };
duke@0 673 };
duke@0 674
duke@0 675 // Simple test vs memory
duke@0 676 class NativeTstRegMem: public NativeInstruction {
duke@0 677 public:
duke@0 678 enum Intel_specific_constants {
iveresov@2251 679 instruction_rex_prefix_mask = 0xF0,
iveresov@2251 680 instruction_rex_prefix = Assembler::REX,
iveresov@2251 681 instruction_code_memXregl = 0x85,
iveresov@2251 682 modrm_mask = 0x38, // select reg from the ModRM byte
iveresov@2251 683 modrm_reg = 0x00 // rax
duke@0 684 };
duke@0 685 };
duke@0 686
duke@0 687 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
duke@0 688 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; }
twisti@9111 689 inline bool NativeInstruction::is_call_reg() { return ubyte_at(0) == NativeCallReg::instruction_code ||
twisti@9111 690 (ubyte_at(1) == NativeCallReg::instruction_code &&
twisti@9111 691 (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); }
duke@0 692 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code ||
duke@0 693 ubyte_at(0) == NativeReturnX::instruction_code; }
duke@0 694 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code ||
duke@0 695 ubyte_at(0) == 0xEB; /* short jump */ }
kvn@12408 696 inline bool NativeInstruction::is_jump_reg() {
kvn@12408 697 int pos = 0;
kvn@12408 698 if (ubyte_at(0) == Assembler::REX_B) pos = 1;
kvn@12408 699 return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0;
kvn@12408 700 }
kvn@12408 701 inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); }
duke@0 702 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
duke@0 703 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
duke@0 704 inline bool NativeInstruction::is_safepoint_poll() {
duke@0 705 #ifdef AMD64
twisti@9111 706 // Try decoding a near safepoint first:
twisti@9111 707 if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
twisti@9111 708 ubyte_at(1) == 0x05) { // 00 rax 101
twisti@9111 709 address fault = addr_at(6) + int_at(2);
twisti@9111 710 NOT_JVMCI(assert(!Assembler::is_polling_page_far(), "unexpected poll encoding");)
twisti@9111 711 return os::is_poll_address(fault);
never@304 712 }
twisti@9111 713 // Now try decoding a far safepoint:
twisti@9111 714 // two cases, depending on the choice of the base register in the address.
twisti@9111 715 if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix &&
twisti@9111 716 ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl &&
twisti@9111 717 (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) ||
jwilhelm@13274 718 (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
jwilhelm@13274 719 (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg)) {
twisti@9111 720 NOT_JVMCI(assert(Assembler::is_polling_page_far(), "unexpected poll encoding");)
twisti@9111 721 return true;
twisti@9111 722 }
twisti@9111 723 return false;
duke@0 724 #else
never@304 725 return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
duke@0 726 ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
duke@0 727 (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
duke@0 728 (os::is_poll_address((address)int_at(2)));
duke@0 729 #endif // AMD64
duke@0 730 }
duke@0 731
duke@0 732 inline bool NativeInstruction::is_mov_literal64() {
duke@0 733 #ifdef AMD64
duke@0 734 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
duke@0 735 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
duke@0 736 #else
duke@0 737 return false;
duke@0 738 #endif // AMD64
duke@0 739 }
stefank@1879 740
stefank@1879 741 #endif // CPU_X86_VM_NATIVEINST_X86_HPP