annotate src/cpu/x86/vm/x86_64.ad @ 1137:97125851f396

6829187: compiler optimizations required for JSR 292 Summary: C2 implementation for invokedynamic support. Reviewed-by: kvn, never
author twisti
date Tue, 05 Jan 2010 13:05:58 +0100
parents 148e5441d916
children 2883969d09e7
rev   line source
duke@0 1 //
twisti@624 2 // Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 //
duke@0 5 // This code is free software; you can redistribute it and/or modify it
duke@0 6 // under the terms of the GNU General Public License version 2 only, as
duke@0 7 // published by the Free Software Foundation.
duke@0 8 //
duke@0 9 // This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 // version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 // accompanied this code).
duke@0 14 //
duke@0 15 // You should have received a copy of the GNU General Public License version
duke@0 16 // 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 //
duke@0 19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 // CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 // have any questions.
duke@0 22 //
duke@0 23 //
duke@0 24
duke@0 25 // AMD64 Architecture Description File
duke@0 26
duke@0 27 //----------REGISTER DEFINITION BLOCK------------------------------------------
duke@0 28 // This information is used by the matcher and the register allocator to
duke@0 29 // describe individual registers and classes of registers within the target
duke@0 30 // archtecture.
duke@0 31
duke@0 32 register %{
duke@0 33 //----------Architecture Description Register Definitions----------------------
duke@0 34 // General Registers
duke@0 35 // "reg_def" name ( register save type, C convention save type,
duke@0 36 // ideal register type, encoding );
duke@0 37 // Register Save Types:
duke@0 38 //
duke@0 39 // NS = No-Save: The register allocator assumes that these registers
duke@0 40 // can be used without saving upon entry to the method, &
duke@0 41 // that they do not need to be saved at call sites.
duke@0 42 //
duke@0 43 // SOC = Save-On-Call: The register allocator assumes that these registers
duke@0 44 // can be used without saving upon entry to the method,
duke@0 45 // but that they must be saved at call sites.
duke@0 46 //
duke@0 47 // SOE = Save-On-Entry: The register allocator assumes that these registers
duke@0 48 // must be saved before using them upon entry to the
duke@0 49 // method, but they do not need to be saved at call
duke@0 50 // sites.
duke@0 51 //
duke@0 52 // AS = Always-Save: The register allocator assumes that these registers
duke@0 53 // must be saved before using them upon entry to the
duke@0 54 // method, & that they must be saved at call sites.
duke@0 55 //
duke@0 56 // Ideal Register Type is used to determine how to save & restore a
duke@0 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
duke@0 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
duke@0 59 //
duke@0 60 // The encoding number is the actual bit-pattern placed into the opcodes.
duke@0 61
duke@0 62 // General Registers
duke@0 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
duke@0 64 // used as byte registers)
duke@0 65
duke@0 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
duke@0 67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
duke@0 68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
duke@0 69
duke@0 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
duke@0 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
duke@0 72
duke@0 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
duke@0 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
duke@0 75
duke@0 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
duke@0 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
duke@0 78
duke@0 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
duke@0 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
duke@0 81
duke@0 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
duke@0 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
duke@0 84
duke@0 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
duke@0 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
duke@0 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
duke@0 88
duke@0 89 #ifdef _WIN64
duke@0 90
duke@0 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
duke@0 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 93
duke@0 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
duke@0 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 96
duke@0 97 #else
duke@0 98
duke@0 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
duke@0 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 101
duke@0 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
duke@0 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 104
duke@0 105 #endif
duke@0 106
duke@0 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
duke@0 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
duke@0 109
duke@0 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
duke@0 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
duke@0 112
duke@0 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
duke@0 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
duke@0 115
duke@0 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
duke@0 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
duke@0 118
duke@0 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
duke@0 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
duke@0 121
duke@0 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
duke@0 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
duke@0 124
duke@0 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
duke@0 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
duke@0 127
duke@0 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
duke@0 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
duke@0 130
duke@0 131
duke@0 132 // Floating Point Registers
duke@0 133
duke@0 134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
duke@0 135 // Word a in each register holds a Float, words ab hold a Double. We
duke@0 136 // currently do not use the SIMD capabilities, so registers cd are
duke@0 137 // unused at the moment.
duke@0 138 // XMM8-XMM15 must be encoded with REX.
duke@0 139 // Linux ABI: No register preserved across function calls
duke@0 140 // XMM0-XMM7 might hold parameters
duke@0 141 // Windows ABI: XMM6-XMM15 preserved across function calls
duke@0 142 // XMM0-XMM3 might hold parameters
duke@0 143
duke@0 144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
duke@0 145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
duke@0 146
duke@0 147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
duke@0 148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
duke@0 149
duke@0 150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
duke@0 151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
duke@0 152
duke@0 153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
duke@0 154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
duke@0 155
duke@0 156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
duke@0 157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
duke@0 158
duke@0 159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
duke@0 160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
duke@0 161
duke@0 162 #ifdef _WIN64
duke@0 163
duke@0 164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
duke@0 165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 166
duke@0 167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
duke@0 168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 169
duke@0 170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
duke@0 171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 172
duke@0 173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
duke@0 174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 175
duke@0 176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
duke@0 177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 178
duke@0 179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
duke@0 180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 181
duke@0 182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
duke@0 183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 184
duke@0 185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
duke@0 186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 187
duke@0 188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
duke@0 189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 190
duke@0 191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
duke@0 192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 193
duke@0 194 #else
duke@0 195
duke@0 196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
duke@0 197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 198
duke@0 199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
duke@0 200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 201
duke@0 202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
duke@0 203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 204
duke@0 205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
duke@0 206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 207
duke@0 208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
duke@0 209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 210
duke@0 211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
duke@0 212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 213
duke@0 214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
duke@0 215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 216
duke@0 217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
duke@0 218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 219
duke@0 220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
duke@0 221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 222
duke@0 223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
duke@0 224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 225
duke@0 226 #endif // _WIN64
duke@0 227
duke@0 228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
duke@0 229
duke@0 230 // Specify priority of register selection within phases of register
duke@0 231 // allocation. Highest priority is first. A useful heuristic is to
duke@0 232 // give registers a low priority when they are required by machine
duke@0 233 // instructions, like EAX and EDX on I486, and choose no-save registers
duke@0 234 // before save-on-call, & save-on-call before save-on-entry. Registers
duke@0 235 // which participate in fixed calling sequences should come last.
duke@0 236 // Registers which are used as pairs must fall on an even boundary.
duke@0 237
duke@0 238 alloc_class chunk0(R10, R10_H,
duke@0 239 R11, R11_H,
duke@0 240 R8, R8_H,
duke@0 241 R9, R9_H,
duke@0 242 R12, R12_H,
duke@0 243 RCX, RCX_H,
duke@0 244 RBX, RBX_H,
duke@0 245 RDI, RDI_H,
duke@0 246 RDX, RDX_H,
duke@0 247 RSI, RSI_H,
duke@0 248 RAX, RAX_H,
duke@0 249 RBP, RBP_H,
duke@0 250 R13, R13_H,
duke@0 251 R14, R14_H,
duke@0 252 R15, R15_H,
duke@0 253 RSP, RSP_H);
duke@0 254
duke@0 255 // XXX probably use 8-15 first on Linux
duke@0 256 alloc_class chunk1(XMM0, XMM0_H,
duke@0 257 XMM1, XMM1_H,
duke@0 258 XMM2, XMM2_H,
duke@0 259 XMM3, XMM3_H,
duke@0 260 XMM4, XMM4_H,
duke@0 261 XMM5, XMM5_H,
duke@0 262 XMM6, XMM6_H,
duke@0 263 XMM7, XMM7_H,
duke@0 264 XMM8, XMM8_H,
duke@0 265 XMM9, XMM9_H,
duke@0 266 XMM10, XMM10_H,
duke@0 267 XMM11, XMM11_H,
duke@0 268 XMM12, XMM12_H,
duke@0 269 XMM13, XMM13_H,
duke@0 270 XMM14, XMM14_H,
duke@0 271 XMM15, XMM15_H);
duke@0 272
duke@0 273 alloc_class chunk2(RFLAGS);
duke@0 274
duke@0 275
duke@0 276 //----------Architecture Description Register Classes--------------------------
duke@0 277 // Several register classes are automatically defined based upon information in
duke@0 278 // this architecture description.
duke@0 279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
duke@0 280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
duke@0 281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
duke@0 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
duke@0 283 //
duke@0 284
duke@0 285 // Class for all pointer registers (including RSP)
duke@0 286 reg_class any_reg(RAX, RAX_H,
duke@0 287 RDX, RDX_H,
duke@0 288 RBP, RBP_H,
duke@0 289 RDI, RDI_H,
duke@0 290 RSI, RSI_H,
duke@0 291 RCX, RCX_H,
duke@0 292 RBX, RBX_H,
duke@0 293 RSP, RSP_H,
duke@0 294 R8, R8_H,
duke@0 295 R9, R9_H,
duke@0 296 R10, R10_H,
duke@0 297 R11, R11_H,
duke@0 298 R12, R12_H,
duke@0 299 R13, R13_H,
duke@0 300 R14, R14_H,
duke@0 301 R15, R15_H);
duke@0 302
duke@0 303 // Class for all pointer registers except RSP
duke@0 304 reg_class ptr_reg(RAX, RAX_H,
duke@0 305 RDX, RDX_H,
duke@0 306 RBP, RBP_H,
duke@0 307 RDI, RDI_H,
duke@0 308 RSI, RSI_H,
duke@0 309 RCX, RCX_H,
duke@0 310 RBX, RBX_H,
duke@0 311 R8, R8_H,
duke@0 312 R9, R9_H,
duke@0 313 R10, R10_H,
duke@0 314 R11, R11_H,
duke@0 315 R13, R13_H,
duke@0 316 R14, R14_H);
duke@0 317
duke@0 318 // Class for all pointer registers except RAX and RSP
duke@0 319 reg_class ptr_no_rax_reg(RDX, RDX_H,
duke@0 320 RBP, RBP_H,
duke@0 321 RDI, RDI_H,
duke@0 322 RSI, RSI_H,
duke@0 323 RCX, RCX_H,
duke@0 324 RBX, RBX_H,
duke@0 325 R8, R8_H,
duke@0 326 R9, R9_H,
duke@0 327 R10, R10_H,
duke@0 328 R11, R11_H,
duke@0 329 R13, R13_H,
duke@0 330 R14, R14_H);
duke@0 331
duke@0 332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
duke@0 333 RAX, RAX_H,
duke@0 334 RDI, RDI_H,
duke@0 335 RSI, RSI_H,
duke@0 336 RCX, RCX_H,
duke@0 337 RBX, RBX_H,
duke@0 338 R8, R8_H,
duke@0 339 R9, R9_H,
duke@0 340 R10, R10_H,
duke@0 341 R11, R11_H,
duke@0 342 R13, R13_H,
duke@0 343 R14, R14_H);
duke@0 344
duke@0 345 // Class for all pointer registers except RAX, RBX and RSP
duke@0 346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
duke@0 347 RBP, RBP_H,
duke@0 348 RDI, RDI_H,
duke@0 349 RSI, RSI_H,
duke@0 350 RCX, RCX_H,
duke@0 351 R8, R8_H,
duke@0 352 R9, R9_H,
duke@0 353 R10, R10_H,
duke@0 354 R11, R11_H,
duke@0 355 R13, R13_H,
duke@0 356 R14, R14_H);
duke@0 357
duke@0 358 // Singleton class for RAX pointer register
duke@0 359 reg_class ptr_rax_reg(RAX, RAX_H);
duke@0 360
duke@0 361 // Singleton class for RBX pointer register
duke@0 362 reg_class ptr_rbx_reg(RBX, RBX_H);
duke@0 363
duke@0 364 // Singleton class for RSI pointer register
duke@0 365 reg_class ptr_rsi_reg(RSI, RSI_H);
duke@0 366
duke@0 367 // Singleton class for RDI pointer register
duke@0 368 reg_class ptr_rdi_reg(RDI, RDI_H);
duke@0 369
duke@0 370 // Singleton class for RBP pointer register
duke@0 371 reg_class ptr_rbp_reg(RBP, RBP_H);
duke@0 372
duke@0 373 // Singleton class for stack pointer
duke@0 374 reg_class ptr_rsp_reg(RSP, RSP_H);
duke@0 375
duke@0 376 // Singleton class for TLS pointer
duke@0 377 reg_class ptr_r15_reg(R15, R15_H);
duke@0 378
duke@0 379 // Class for all long registers (except RSP)
duke@0 380 reg_class long_reg(RAX, RAX_H,
duke@0 381 RDX, RDX_H,
duke@0 382 RBP, RBP_H,
duke@0 383 RDI, RDI_H,
duke@0 384 RSI, RSI_H,
duke@0 385 RCX, RCX_H,
duke@0 386 RBX, RBX_H,
duke@0 387 R8, R8_H,
duke@0 388 R9, R9_H,
duke@0 389 R10, R10_H,
duke@0 390 R11, R11_H,
duke@0 391 R13, R13_H,
duke@0 392 R14, R14_H);
duke@0 393
duke@0 394 // Class for all long registers except RAX, RDX (and RSP)
duke@0 395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
duke@0 396 RDI, RDI_H,
duke@0 397 RSI, RSI_H,
duke@0 398 RCX, RCX_H,
duke@0 399 RBX, RBX_H,
duke@0 400 R8, R8_H,
duke@0 401 R9, R9_H,
duke@0 402 R10, R10_H,
duke@0 403 R11, R11_H,
duke@0 404 R13, R13_H,
duke@0 405 R14, R14_H);
duke@0 406
duke@0 407 // Class for all long registers except RCX (and RSP)
duke@0 408 reg_class long_no_rcx_reg(RBP, RBP_H,
duke@0 409 RDI, RDI_H,
duke@0 410 RSI, RSI_H,
duke@0 411 RAX, RAX_H,
duke@0 412 RDX, RDX_H,
duke@0 413 RBX, RBX_H,
duke@0 414 R8, R8_H,
duke@0 415 R9, R9_H,
duke@0 416 R10, R10_H,
duke@0 417 R11, R11_H,
duke@0 418 R13, R13_H,
duke@0 419 R14, R14_H);
duke@0 420
duke@0 421 // Class for all long registers except RAX (and RSP)
duke@0 422 reg_class long_no_rax_reg(RBP, RBP_H,
duke@0 423 RDX, RDX_H,
duke@0 424 RDI, RDI_H,
duke@0 425 RSI, RSI_H,
duke@0 426 RCX, RCX_H,
duke@0 427 RBX, RBX_H,
duke@0 428 R8, R8_H,
duke@0 429 R9, R9_H,
duke@0 430 R10, R10_H,
duke@0 431 R11, R11_H,
duke@0 432 R13, R13_H,
duke@0 433 R14, R14_H);
duke@0 434
duke@0 435 // Singleton class for RAX long register
duke@0 436 reg_class long_rax_reg(RAX, RAX_H);
duke@0 437
duke@0 438 // Singleton class for RCX long register
duke@0 439 reg_class long_rcx_reg(RCX, RCX_H);
duke@0 440
duke@0 441 // Singleton class for RDX long register
duke@0 442 reg_class long_rdx_reg(RDX, RDX_H);
duke@0 443
duke@0 444 // Class for all int registers (except RSP)
duke@0 445 reg_class int_reg(RAX,
duke@0 446 RDX,
duke@0 447 RBP,
duke@0 448 RDI,
duke@0 449 RSI,
duke@0 450 RCX,
duke@0 451 RBX,
duke@0 452 R8,
duke@0 453 R9,
duke@0 454 R10,
duke@0 455 R11,
duke@0 456 R13,
duke@0 457 R14);
duke@0 458
duke@0 459 // Class for all int registers except RCX (and RSP)
duke@0 460 reg_class int_no_rcx_reg(RAX,
duke@0 461 RDX,
duke@0 462 RBP,
duke@0 463 RDI,
duke@0 464 RSI,
duke@0 465 RBX,
duke@0 466 R8,
duke@0 467 R9,
duke@0 468 R10,
duke@0 469 R11,
duke@0 470 R13,
duke@0 471 R14);
duke@0 472
duke@0 473 // Class for all int registers except RAX, RDX (and RSP)
duke@0 474 reg_class int_no_rax_rdx_reg(RBP,
never@304 475 RDI,
duke@0 476 RSI,
duke@0 477 RCX,
duke@0 478 RBX,
duke@0 479 R8,
duke@0 480 R9,
duke@0 481 R10,
duke@0 482 R11,
duke@0 483 R13,
duke@0 484 R14);
duke@0 485
duke@0 486 // Singleton class for RAX int register
duke@0 487 reg_class int_rax_reg(RAX);
duke@0 488
duke@0 489 // Singleton class for RBX int register
duke@0 490 reg_class int_rbx_reg(RBX);
duke@0 491
duke@0 492 // Singleton class for RCX int register
duke@0 493 reg_class int_rcx_reg(RCX);
duke@0 494
duke@0 495 // Singleton class for RCX int register
duke@0 496 reg_class int_rdx_reg(RDX);
duke@0 497
duke@0 498 // Singleton class for RCX int register
duke@0 499 reg_class int_rdi_reg(RDI);
duke@0 500
duke@0 501 // Singleton class for instruction pointer
duke@0 502 // reg_class ip_reg(RIP);
duke@0 503
duke@0 504 // Singleton class for condition codes
duke@0 505 reg_class int_flags(RFLAGS);
duke@0 506
duke@0 507 // Class for all float registers
duke@0 508 reg_class float_reg(XMM0,
duke@0 509 XMM1,
duke@0 510 XMM2,
duke@0 511 XMM3,
duke@0 512 XMM4,
duke@0 513 XMM5,
duke@0 514 XMM6,
duke@0 515 XMM7,
duke@0 516 XMM8,
duke@0 517 XMM9,
duke@0 518 XMM10,
duke@0 519 XMM11,
duke@0 520 XMM12,
duke@0 521 XMM13,
duke@0 522 XMM14,
duke@0 523 XMM15);
duke@0 524
duke@0 525 // Class for all double registers
duke@0 526 reg_class double_reg(XMM0, XMM0_H,
duke@0 527 XMM1, XMM1_H,
duke@0 528 XMM2, XMM2_H,
duke@0 529 XMM3, XMM3_H,
duke@0 530 XMM4, XMM4_H,
duke@0 531 XMM5, XMM5_H,
duke@0 532 XMM6, XMM6_H,
duke@0 533 XMM7, XMM7_H,
duke@0 534 XMM8, XMM8_H,
duke@0 535 XMM9, XMM9_H,
duke@0 536 XMM10, XMM10_H,
duke@0 537 XMM11, XMM11_H,
duke@0 538 XMM12, XMM12_H,
duke@0 539 XMM13, XMM13_H,
duke@0 540 XMM14, XMM14_H,
duke@0 541 XMM15, XMM15_H);
duke@0 542 %}
duke@0 543
duke@0 544
duke@0 545 //----------SOURCE BLOCK-------------------------------------------------------
duke@0 546 // This is a block of C++ code which provides values, functions, and
duke@0 547 // definitions necessary in the rest of the architecture description
duke@0 548 source %{
never@304 549 #define RELOC_IMM64 Assembler::imm_operand
duke@0 550 #define RELOC_DISP32 Assembler::disp32_operand
duke@0 551
duke@0 552 #define __ _masm.
duke@0 553
twisti@1137 554 static int preserve_SP_size() {
twisti@1137 555 return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
twisti@1137 556 }
twisti@1137 557
duke@0 558 // !!!!! Special hack to get all types of calls to specify the byte offset
duke@0 559 // from the start of the call to the point where the return address
duke@0 560 // will point.
duke@0 561 int MachCallStaticJavaNode::ret_addr_offset()
duke@0 562 {
twisti@1137 563 int offset = 5; // 5 bytes from start of call to where return address points
twisti@1137 564 if (_method_handle_invoke)
twisti@1137 565 offset += preserve_SP_size();
twisti@1137 566 return offset;
duke@0 567 }
duke@0 568
duke@0 569 int MachCallDynamicJavaNode::ret_addr_offset()
duke@0 570 {
duke@0 571 return 15; // 15 bytes from start of call to where return address points
duke@0 572 }
duke@0 573
duke@0 574 // In os_cpu .ad file
duke@0 575 // int MachCallRuntimeNode::ret_addr_offset()
duke@0 576
duke@0 577 // Indicate if the safepoint node needs the polling page as an input.
duke@0 578 // Since amd64 does not have absolute addressing but RIP-relative
duke@0 579 // addressing and the polling page is within 2G, it doesn't.
duke@0 580 bool SafePointNode::needs_polling_address_input()
duke@0 581 {
duke@0 582 return false;
duke@0 583 }
duke@0 584
duke@0 585 //
duke@0 586 // Compute padding required for nodes which need alignment
duke@0 587 //
duke@0 588
duke@0 589 // The address of the call instruction needs to be 4-byte aligned to
duke@0 590 // ensure that it does not span a cache line so that it can be patched.
duke@0 591 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
duke@0 592 {
duke@0 593 current_offset += 1; // skip call opcode byte
duke@0 594 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 595 }
duke@0 596
duke@0 597 // The address of the call instruction needs to be 4-byte aligned to
duke@0 598 // ensure that it does not span a cache line so that it can be patched.
twisti@1137 599 int CallStaticJavaHandleNode::compute_padding(int current_offset) const
twisti@1137 600 {
twisti@1137 601 current_offset += preserve_SP_size(); // skip mov rbp, rsp
twisti@1137 602 current_offset += 1; // skip call opcode byte
twisti@1137 603 return round_to(current_offset, alignment_required()) - current_offset;
twisti@1137 604 }
twisti@1137 605
twisti@1137 606 // The address of the call instruction needs to be 4-byte aligned to
twisti@1137 607 // ensure that it does not span a cache line so that it can be patched.
duke@0 608 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
duke@0 609 {
duke@0 610 current_offset += 11; // skip movq instruction + call opcode byte
duke@0 611 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 612 }
duke@0 613
duke@0 614 #ifndef PRODUCT
duke@0 615 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 616 {
duke@0 617 st->print("INT3");
duke@0 618 }
duke@0 619 #endif
duke@0 620
duke@0 621 // EMIT_RM()
duke@0 622 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
duke@0 623 {
duke@0 624 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
duke@0 625 *(cbuf.code_end()) = c;
duke@0 626 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 627 }
duke@0 628
duke@0 629 // EMIT_CC()
duke@0 630 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
duke@0 631 {
duke@0 632 unsigned char c = (unsigned char) (f1 | f2);
duke@0 633 *(cbuf.code_end()) = c;
duke@0 634 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 635 }
duke@0 636
duke@0 637 // EMIT_OPCODE()
duke@0 638 void emit_opcode(CodeBuffer &cbuf, int code)
duke@0 639 {
duke@0 640 *(cbuf.code_end()) = (unsigned char) code;
duke@0 641 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 642 }
duke@0 643
duke@0 644 // EMIT_OPCODE() w/ relocation information
duke@0 645 void emit_opcode(CodeBuffer &cbuf,
duke@0 646 int code, relocInfo::relocType reloc, int offset, int format)
duke@0 647 {
duke@0 648 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
duke@0 649 emit_opcode(cbuf, code);
duke@0 650 }
duke@0 651
duke@0 652 // EMIT_D8()
duke@0 653 void emit_d8(CodeBuffer &cbuf, int d8)
duke@0 654 {
duke@0 655 *(cbuf.code_end()) = (unsigned char) d8;
duke@0 656 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 657 }
duke@0 658
duke@0 659 // EMIT_D16()
duke@0 660 void emit_d16(CodeBuffer &cbuf, int d16)
duke@0 661 {
duke@0 662 *((short *)(cbuf.code_end())) = d16;
duke@0 663 cbuf.set_code_end(cbuf.code_end() + 2);
duke@0 664 }
duke@0 665
duke@0 666 // EMIT_D32()
duke@0 667 void emit_d32(CodeBuffer &cbuf, int d32)
duke@0 668 {
duke@0 669 *((int *)(cbuf.code_end())) = d32;
duke@0 670 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 671 }
duke@0 672
duke@0 673 // EMIT_D64()
duke@0 674 void emit_d64(CodeBuffer &cbuf, int64_t d64)
duke@0 675 {
duke@0 676 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 677 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 678 }
duke@0 679
duke@0 680 // emit 32 bit value and construct relocation entry from relocInfo::relocType
duke@0 681 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 682 int d32,
duke@0 683 relocInfo::relocType reloc,
duke@0 684 int format)
duke@0 685 {
duke@0 686 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
duke@0 687 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 688
duke@0 689 *((int*) (cbuf.code_end())) = d32;
duke@0 690 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 691 }
duke@0 692
duke@0 693 // emit 32 bit value and construct relocation entry from RelocationHolder
duke@0 694 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 695 int d32,
duke@0 696 RelocationHolder const& rspec,
duke@0 697 int format)
duke@0 698 {
duke@0 699 #ifdef ASSERT
duke@0 700 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 701 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
jrose@989 702 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
duke@0 703 }
duke@0 704 #endif
duke@0 705 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 706
duke@0 707 *((int* )(cbuf.code_end())) = d32;
duke@0 708 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 709 }
duke@0 710
duke@0 711 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
duke@0 712 address next_ip = cbuf.code_end() + 4;
duke@0 713 emit_d32_reloc(cbuf, (int) (addr - next_ip),
duke@0 714 external_word_Relocation::spec(addr),
duke@0 715 RELOC_DISP32);
duke@0 716 }
duke@0 717
duke@0 718
duke@0 719 // emit 64 bit value and construct relocation entry from relocInfo::relocType
duke@0 720 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 721 int64_t d64,
duke@0 722 relocInfo::relocType reloc,
duke@0 723 int format)
duke@0 724 {
duke@0 725 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 726
duke@0 727 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 728 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 729 }
duke@0 730
duke@0 731 // emit 64 bit value and construct relocation entry from RelocationHolder
duke@0 732 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 733 int64_t d64,
duke@0 734 RelocationHolder const& rspec,
duke@0 735 int format)
duke@0 736 {
duke@0 737 #ifdef ASSERT
duke@0 738 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 739 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
jrose@989 740 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
jrose@989 741 "cannot embed scavengable oops in code");
duke@0 742 }
duke@0 743 #endif
duke@0 744 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 745
duke@0 746 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 747 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 748 }
duke@0 749
duke@0 750 // Access stack slot for load or store
duke@0 751 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
duke@0 752 {
duke@0 753 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
duke@0 754 if (-0x80 <= disp && disp < 0x80) {
duke@0 755 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
duke@0 756 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 757 emit_d8(cbuf, disp); // Displacement // R/M byte
duke@0 758 } else {
duke@0 759 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
duke@0 760 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 761 emit_d32(cbuf, disp); // Displacement // R/M byte
duke@0 762 }
duke@0 763 }
duke@0 764
duke@0 765 // rRegI ereg, memory mem) %{ // emit_reg_mem
duke@0 766 void encode_RegMem(CodeBuffer &cbuf,
duke@0 767 int reg,
duke@0 768 int base, int index, int scale, int disp, bool disp_is_oop)
duke@0 769 {
duke@0 770 assert(!disp_is_oop, "cannot have disp");
duke@0 771 int regenc = reg & 7;
duke@0 772 int baseenc = base & 7;
duke@0 773 int indexenc = index & 7;
duke@0 774
duke@0 775 // There is no index & no scale, use form without SIB byte
duke@0 776 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
duke@0 777 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 778 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 779 emit_rm(cbuf, 0x0, regenc, baseenc); // *
duke@0 780 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 781 // If 8-bit displacement, mode 0x1
duke@0 782 emit_rm(cbuf, 0x1, regenc, baseenc); // *
duke@0 783 emit_d8(cbuf, disp);
duke@0 784 } else {
duke@0 785 // If 32-bit displacement
duke@0 786 if (base == -1) { // Special flag for absolute address
duke@0 787 emit_rm(cbuf, 0x0, regenc, 0x5); // *
duke@0 788 if (disp_is_oop) {
duke@0 789 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 790 } else {
duke@0 791 emit_d32(cbuf, disp);
duke@0 792 }
duke@0 793 } else {
duke@0 794 // Normal base + offset
duke@0 795 emit_rm(cbuf, 0x2, regenc, baseenc); // *
duke@0 796 if (disp_is_oop) {
duke@0 797 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 798 } else {
duke@0 799 emit_d32(cbuf, disp);
duke@0 800 }
duke@0 801 }
duke@0 802 }
duke@0 803 } else {
duke@0 804 // Else, encode with the SIB byte
duke@0 805 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 806 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 807 // If no displacement
duke@0 808 emit_rm(cbuf, 0x0, regenc, 0x4); // *
duke@0 809 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 810 } else {
duke@0 811 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 812 // If 8-bit displacement, mode 0x1
duke@0 813 emit_rm(cbuf, 0x1, regenc, 0x4); // *
duke@0 814 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 815 emit_d8(cbuf, disp);
duke@0 816 } else {
duke@0 817 // If 32-bit displacement
duke@0 818 if (base == 0x04 ) {
duke@0 819 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 820 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
duke@0 821 } else {
duke@0 822 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 823 emit_rm(cbuf, scale, indexenc, baseenc); // *
duke@0 824 }
duke@0 825 if (disp_is_oop) {
duke@0 826 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 827 } else {
duke@0 828 emit_d32(cbuf, disp);
duke@0 829 }
duke@0 830 }
duke@0 831 }
duke@0 832 }
duke@0 833 }
duke@0 834
duke@0 835 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
duke@0 836 {
duke@0 837 if (dstenc != srcenc) {
duke@0 838 if (dstenc < 8) {
duke@0 839 if (srcenc >= 8) {
duke@0 840 emit_opcode(cbuf, Assembler::REX_B);
duke@0 841 srcenc -= 8;
duke@0 842 }
duke@0 843 } else {
duke@0 844 if (srcenc < 8) {
duke@0 845 emit_opcode(cbuf, Assembler::REX_R);
duke@0 846 } else {
duke@0 847 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 848 srcenc -= 8;
duke@0 849 }
duke@0 850 dstenc -= 8;
duke@0 851 }
duke@0 852
duke@0 853 emit_opcode(cbuf, 0x8B);
duke@0 854 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 855 }
duke@0 856 }
duke@0 857
duke@0 858 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
duke@0 859 if( dst_encoding == src_encoding ) {
duke@0 860 // reg-reg copy, use an empty encoding
duke@0 861 } else {
duke@0 862 MacroAssembler _masm(&cbuf);
duke@0 863
duke@0 864 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
duke@0 865 }
duke@0 866 }
duke@0 867
duke@0 868
duke@0 869 //=============================================================================
duke@0 870 #ifndef PRODUCT
duke@0 871 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 872 {
duke@0 873 Compile* C = ra_->C;
duke@0 874
duke@0 875 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 876 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 877 // Remove wordSize for return adr already pushed
duke@0 878 // and another for the RBP we are going to save
duke@0 879 framesize -= 2*wordSize;
duke@0 880 bool need_nop = true;
duke@0 881
duke@0 882 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 883 // We require that their callers must bang for them. But be
duke@0 884 // careful, because some VM calls (such as call site linkage) can
duke@0 885 // use several kilobytes of stack. But the stack safety zone should
duke@0 886 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 887 if (C->need_stack_bang(framesize)) {
duke@0 888 st->print_cr("# stack bang"); st->print("\t");
duke@0 889 need_nop = false;
duke@0 890 }
duke@0 891 st->print_cr("pushq rbp"); st->print("\t");
duke@0 892
duke@0 893 if (VerifyStackAtCalls) {
duke@0 894 // Majik cookie to verify stack depth
duke@0 895 st->print_cr("pushq 0xffffffffbadb100d"
duke@0 896 "\t# Majik cookie for stack depth check");
duke@0 897 st->print("\t");
duke@0 898 framesize -= wordSize; // Remove 2 for cookie
duke@0 899 need_nop = false;
duke@0 900 }
duke@0 901
duke@0 902 if (framesize) {
duke@0 903 st->print("subq rsp, #%d\t# Create frame", framesize);
duke@0 904 if (framesize < 0x80 && need_nop) {
duke@0 905 st->print("\n\tnop\t# nop for patch_verified_entry");
duke@0 906 }
duke@0 907 }
duke@0 908 }
duke@0 909 #endif
duke@0 910
duke@0 911 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 912 {
duke@0 913 Compile* C = ra_->C;
duke@0 914
duke@0 915 // WARNING: Initial instruction MUST be 5 bytes or longer so that
duke@0 916 // NativeJump::patch_verified_entry will be able to patch out the entry
duke@0 917 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
duke@0 918 // depth is ok at 5 bytes, the frame allocation can be either 3 or
duke@0 919 // 6 bytes. So if we don't do the fldcw or the push then we must
duke@0 920 // use the 6 byte frame allocation even if we have no frame. :-(
duke@0 921 // If method sets FPU control word do it now
duke@0 922
duke@0 923 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 924 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 925 // Remove wordSize for return adr already pushed
duke@0 926 // and another for the RBP we are going to save
duke@0 927 framesize -= 2*wordSize;
duke@0 928 bool need_nop = true;
duke@0 929
duke@0 930 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 931 // We require that their callers must bang for them. But be
duke@0 932 // careful, because some VM calls (such as call site linkage) can
duke@0 933 // use several kilobytes of stack. But the stack safety zone should
duke@0 934 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 935 if (C->need_stack_bang(framesize)) {
duke@0 936 MacroAssembler masm(&cbuf);
duke@0 937 masm.generate_stack_overflow_check(framesize);
duke@0 938 need_nop = false;
duke@0 939 }
duke@0 940
duke@0 941 // We always push rbp so that on return to interpreter rbp will be
duke@0 942 // restored correctly and we can correct the stack.
duke@0 943 emit_opcode(cbuf, 0x50 | RBP_enc);
duke@0 944
duke@0 945 if (VerifyStackAtCalls) {
duke@0 946 // Majik cookie to verify stack depth
duke@0 947 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
duke@0 948 emit_d32(cbuf, 0xbadb100d);
duke@0 949 framesize -= wordSize; // Remove 2 for cookie
duke@0 950 need_nop = false;
duke@0 951 }
duke@0 952
duke@0 953 if (framesize) {
duke@0 954 emit_opcode(cbuf, Assembler::REX_W);
duke@0 955 if (framesize < 0x80) {
duke@0 956 emit_opcode(cbuf, 0x83); // sub SP,#framesize
duke@0 957 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 958 emit_d8(cbuf, framesize);
duke@0 959 if (need_nop) {
duke@0 960 emit_opcode(cbuf, 0x90); // nop
duke@0 961 }
duke@0 962 } else {
duke@0 963 emit_opcode(cbuf, 0x81); // sub SP,#framesize
duke@0 964 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 965 emit_d32(cbuf, framesize);
duke@0 966 }
duke@0 967 }
duke@0 968
duke@0 969 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
duke@0 970
duke@0 971 #ifdef ASSERT
duke@0 972 if (VerifyStackAtCalls) {
duke@0 973 Label L;
duke@0 974 MacroAssembler masm(&cbuf);
never@304 975 masm.push(rax);
never@304 976 masm.mov(rax, rsp);
never@304 977 masm.andptr(rax, StackAlignmentInBytes-1);
never@304 978 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
never@304 979 masm.pop(rax);
duke@0 980 masm.jcc(Assembler::equal, L);
duke@0 981 masm.stop("Stack is not properly aligned!");
duke@0 982 masm.bind(L);
duke@0 983 }
duke@0 984 #endif
duke@0 985 }
duke@0 986
duke@0 987 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
duke@0 988 {
duke@0 989 return MachNode::size(ra_); // too many variables; just compute it
duke@0 990 // the hard way
duke@0 991 }
duke@0 992
duke@0 993 int MachPrologNode::reloc() const
duke@0 994 {
duke@0 995 return 0; // a large enough number
duke@0 996 }
duke@0 997
duke@0 998 //=============================================================================
duke@0 999 #ifndef PRODUCT
duke@0 1000 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1001 {
duke@0 1002 Compile* C = ra_->C;
duke@0 1003 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1004 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1005 // Remove word for return adr already pushed
duke@0 1006 // and RBP
duke@0 1007 framesize -= 2*wordSize;
duke@0 1008
duke@0 1009 if (framesize) {
duke@0 1010 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
duke@0 1011 st->print("\t");
duke@0 1012 }
duke@0 1013
duke@0 1014 st->print_cr("popq\trbp");
duke@0 1015 if (do_polling() && C->is_method_compilation()) {
duke@0 1016 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
duke@0 1017 "# Safepoint: poll for GC");
duke@0 1018 st->print("\t");
duke@0 1019 }
duke@0 1020 }
duke@0 1021 #endif
duke@0 1022
duke@0 1023 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1024 {
duke@0 1025 Compile* C = ra_->C;
duke@0 1026 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1027 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1028 // Remove word for return adr already pushed
duke@0 1029 // and RBP
duke@0 1030 framesize -= 2*wordSize;
duke@0 1031
duke@0 1032 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
duke@0 1033
duke@0 1034 if (framesize) {
duke@0 1035 emit_opcode(cbuf, Assembler::REX_W);
duke@0 1036 if (framesize < 0x80) {
duke@0 1037 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
duke@0 1038 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1039 emit_d8(cbuf, framesize);
duke@0 1040 } else {
duke@0 1041 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
duke@0 1042 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1043 emit_d32(cbuf, framesize);
duke@0 1044 }
duke@0 1045 }
duke@0 1046
duke@0 1047 // popq rbp
duke@0 1048 emit_opcode(cbuf, 0x58 | RBP_enc);
duke@0 1049
duke@0 1050 if (do_polling() && C->is_method_compilation()) {
duke@0 1051 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
duke@0 1052 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 1053 cbuf.set_inst_mark();
duke@0 1054 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
duke@0 1055 emit_opcode(cbuf, 0x85); // testl
duke@0 1056 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
duke@0 1057 // cbuf.inst_mark() is beginning of instruction
duke@0 1058 emit_d32_reloc(cbuf, os::get_polling_page());
duke@0 1059 // relocInfo::poll_return_type,
duke@0 1060 }
duke@0 1061 }
duke@0 1062
duke@0 1063 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
duke@0 1064 {
duke@0 1065 Compile* C = ra_->C;
duke@0 1066 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1067 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1068 // Remove word for return adr already pushed
duke@0 1069 // and RBP
duke@0 1070 framesize -= 2*wordSize;
duke@0 1071
duke@0 1072 uint size = 0;
duke@0 1073
duke@0 1074 if (do_polling() && C->is_method_compilation()) {
duke@0 1075 size += 6;
duke@0 1076 }
duke@0 1077
duke@0 1078 // count popq rbp
duke@0 1079 size++;
duke@0 1080
duke@0 1081 if (framesize) {
duke@0 1082 if (framesize < 0x80) {
duke@0 1083 size += 4;
duke@0 1084 } else if (framesize) {
duke@0 1085 size += 7;
duke@0 1086 }
duke@0 1087 }
duke@0 1088
duke@0 1089 return size;
duke@0 1090 }
duke@0 1091
duke@0 1092 int MachEpilogNode::reloc() const
duke@0 1093 {
duke@0 1094 return 2; // a large enough number
duke@0 1095 }
duke@0 1096
duke@0 1097 const Pipeline* MachEpilogNode::pipeline() const
duke@0 1098 {
duke@0 1099 return MachNode::pipeline_class();
duke@0 1100 }
duke@0 1101
duke@0 1102 int MachEpilogNode::safepoint_offset() const
duke@0 1103 {
duke@0 1104 return 0;
duke@0 1105 }
duke@0 1106
duke@0 1107 //=============================================================================
duke@0 1108
duke@0 1109 enum RC {
duke@0 1110 rc_bad,
duke@0 1111 rc_int,
duke@0 1112 rc_float,
duke@0 1113 rc_stack
duke@0 1114 };
duke@0 1115
duke@0 1116 static enum RC rc_class(OptoReg::Name reg)
duke@0 1117 {
duke@0 1118 if( !OptoReg::is_valid(reg) ) return rc_bad;
duke@0 1119
duke@0 1120 if (OptoReg::is_stack(reg)) return rc_stack;
duke@0 1121
duke@0 1122 VMReg r = OptoReg::as_VMReg(reg);
duke@0 1123
duke@0 1124 if (r->is_Register()) return rc_int;
duke@0 1125
duke@0 1126 assert(r->is_XMMRegister(), "must be");
duke@0 1127 return rc_float;
duke@0 1128 }
duke@0 1129
duke@0 1130 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
duke@0 1131 PhaseRegAlloc* ra_,
duke@0 1132 bool do_size,
duke@0 1133 outputStream* st) const
duke@0 1134 {
duke@0 1135
duke@0 1136 // Get registers to move
duke@0 1137 OptoReg::Name src_second = ra_->get_reg_second(in(1));
duke@0 1138 OptoReg::Name src_first = ra_->get_reg_first(in(1));
duke@0 1139 OptoReg::Name dst_second = ra_->get_reg_second(this);
duke@0 1140 OptoReg::Name dst_first = ra_->get_reg_first(this);
duke@0 1141
duke@0 1142 enum RC src_second_rc = rc_class(src_second);
duke@0 1143 enum RC src_first_rc = rc_class(src_first);
duke@0 1144 enum RC dst_second_rc = rc_class(dst_second);
duke@0 1145 enum RC dst_first_rc = rc_class(dst_first);
duke@0 1146
duke@0 1147 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
duke@0 1148 "must move at least 1 register" );
duke@0 1149
duke@0 1150 if (src_first == dst_first && src_second == dst_second) {
duke@0 1151 // Self copy, no move
duke@0 1152 return 0;
duke@0 1153 } else if (src_first_rc == rc_stack) {
duke@0 1154 // mem ->
duke@0 1155 if (dst_first_rc == rc_stack) {
duke@0 1156 // mem -> mem
duke@0 1157 assert(src_second != dst_first, "overlap");
duke@0 1158 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1159 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1160 // 64-bit
duke@0 1161 int src_offset = ra_->reg2offset(src_first);
duke@0 1162 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1163 if (cbuf) {
duke@0 1164 emit_opcode(*cbuf, 0xFF);
duke@0 1165 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
duke@0 1166
duke@0 1167 emit_opcode(*cbuf, 0x8F);
duke@0 1168 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
duke@0 1169
duke@0 1170 #ifndef PRODUCT
duke@0 1171 } else if (!do_size) {
duke@0 1172 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
duke@0 1173 "popq [rsp + #%d]",
duke@0 1174 src_offset,
duke@0 1175 dst_offset);
duke@0 1176 #endif
duke@0 1177 }
duke@0 1178 return
duke@0 1179 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
duke@0 1180 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
duke@0 1181 } else {
duke@0 1182 // 32-bit
duke@0 1183 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1184 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1185 // No pushl/popl, so:
duke@0 1186 int src_offset = ra_->reg2offset(src_first);
duke@0 1187 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1188 if (cbuf) {
duke@0 1189 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1190 emit_opcode(*cbuf, 0x89);
duke@0 1191 emit_opcode(*cbuf, 0x44);
duke@0 1192 emit_opcode(*cbuf, 0x24);
duke@0 1193 emit_opcode(*cbuf, 0xF8);
duke@0 1194
duke@0 1195 emit_opcode(*cbuf, 0x8B);
duke@0 1196 encode_RegMem(*cbuf,
duke@0 1197 RAX_enc,
duke@0 1198 RSP_enc, 0x4, 0, src_offset,
duke@0 1199 false);
duke@0 1200
duke@0 1201 emit_opcode(*cbuf, 0x89);
duke@0 1202 encode_RegMem(*cbuf,
duke@0 1203 RAX_enc,
duke@0 1204 RSP_enc, 0x4, 0, dst_offset,
duke@0 1205 false);
duke@0 1206
duke@0 1207 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1208 emit_opcode(*cbuf, 0x8B);
duke@0 1209 emit_opcode(*cbuf, 0x44);
duke@0 1210 emit_opcode(*cbuf, 0x24);
duke@0 1211 emit_opcode(*cbuf, 0xF8);
duke@0 1212
duke@0 1213 #ifndef PRODUCT
duke@0 1214 } else if (!do_size) {
duke@0 1215 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
duke@0 1216 "movl rax, [rsp + #%d]\n\t"
duke@0 1217 "movl [rsp + #%d], rax\n\t"
duke@0 1218 "movq rax, [rsp - #8]",
duke@0 1219 src_offset,
duke@0 1220 dst_offset);
duke@0 1221 #endif
duke@0 1222 }
duke@0 1223 return
duke@0 1224 5 + // movq
duke@0 1225 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1226 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1227 5; // movq
duke@0 1228 }
duke@0 1229 } else if (dst_first_rc == rc_int) {
duke@0 1230 // mem -> gpr
duke@0 1231 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1232 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1233 // 64-bit
duke@0 1234 int offset = ra_->reg2offset(src_first);
duke@0 1235 if (cbuf) {
duke@0 1236 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1237 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1238 } else {
duke@0 1239 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1240 }
duke@0 1241 emit_opcode(*cbuf, 0x8B);
duke@0 1242 encode_RegMem(*cbuf,
duke@0 1243 Matcher::_regEncode[dst_first],
duke@0 1244 RSP_enc, 0x4, 0, offset,
duke@0 1245 false);
duke@0 1246 #ifndef PRODUCT
duke@0 1247 } else if (!do_size) {
duke@0 1248 st->print("movq %s, [rsp + #%d]\t# spill",
duke@0 1249 Matcher::regName[dst_first],
duke@0 1250 offset);
duke@0 1251 #endif
duke@0 1252 }
duke@0 1253 return
duke@0 1254 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1255 } else {
duke@0 1256 // 32-bit
duke@0 1257 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1258 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1259 int offset = ra_->reg2offset(src_first);
duke@0 1260 if (cbuf) {
duke@0 1261 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1262 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1263 }
duke@0 1264 emit_opcode(*cbuf, 0x8B);
duke@0 1265 encode_RegMem(*cbuf,
duke@0 1266 Matcher::_regEncode[dst_first],
duke@0 1267 RSP_enc, 0x4, 0, offset,
duke@0 1268 false);
duke@0 1269 #ifndef PRODUCT
duke@0 1270 } else if (!do_size) {
duke@0 1271 st->print("movl %s, [rsp + #%d]\t# spill",
duke@0 1272 Matcher::regName[dst_first],
duke@0 1273 offset);
duke@0 1274 #endif
duke@0 1275 }
duke@0 1276 return
duke@0 1277 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1278 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1279 ? 3
duke@0 1280 : 4); // REX
duke@0 1281 }
duke@0 1282 } else if (dst_first_rc == rc_float) {
duke@0 1283 // mem-> xmm
duke@0 1284 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1285 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1286 // 64-bit
duke@0 1287 int offset = ra_->reg2offset(src_first);
duke@0 1288 if (cbuf) {
duke@0 1289 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 1290 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1291 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1292 }
duke@0 1293 emit_opcode(*cbuf, 0x0F);
duke@0 1294 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 1295 encode_RegMem(*cbuf,
duke@0 1296 Matcher::_regEncode[dst_first],
duke@0 1297 RSP_enc, 0x4, 0, offset,
duke@0 1298 false);
duke@0 1299 #ifndef PRODUCT
duke@0 1300 } else if (!do_size) {
duke@0 1301 st->print("%s %s, [rsp + #%d]\t# spill",
duke@0 1302 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
duke@0 1303 Matcher::regName[dst_first],
duke@0 1304 offset);
duke@0 1305 #endif
duke@0 1306 }
duke@0 1307 return
duke@0 1308 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1309 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1310 ? 5
duke@0 1311 : 6); // REX
duke@0 1312 } else {
duke@0 1313 // 32-bit
duke@0 1314 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1315 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1316 int offset = ra_->reg2offset(src_first);
duke@0 1317 if (cbuf) {
duke@0 1318 emit_opcode(*cbuf, 0xF3);
duke@0 1319 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1320 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1321 }
duke@0 1322 emit_opcode(*cbuf, 0x0F);
duke@0 1323 emit_opcode(*cbuf, 0x10);
duke@0 1324 encode_RegMem(*cbuf,
duke@0 1325 Matcher::_regEncode[dst_first],
duke@0 1326 RSP_enc, 0x4, 0, offset,
duke@0 1327 false);
duke@0 1328 #ifndef PRODUCT
duke@0 1329 } else if (!do_size) {
duke@0 1330 st->print("movss %s, [rsp + #%d]\t# spill",
duke@0 1331 Matcher::regName[dst_first],
duke@0 1332 offset);
duke@0 1333 #endif
duke@0 1334 }
duke@0 1335 return
duke@0 1336 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1337 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1338 ? 5
duke@0 1339 : 6); // REX
duke@0 1340 }
duke@0 1341 }
duke@0 1342 } else if (src_first_rc == rc_int) {
duke@0 1343 // gpr ->
duke@0 1344 if (dst_first_rc == rc_stack) {
duke@0 1345 // gpr -> mem
duke@0 1346 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1347 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1348 // 64-bit
duke@0 1349 int offset = ra_->reg2offset(dst_first);
duke@0 1350 if (cbuf) {
duke@0 1351 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1352 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1353 } else {
duke@0 1354 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1355 }
duke@0 1356 emit_opcode(*cbuf, 0x89);
duke@0 1357 encode_RegMem(*cbuf,
duke@0 1358 Matcher::_regEncode[src_first],
duke@0 1359 RSP_enc, 0x4, 0, offset,
duke@0 1360 false);
duke@0 1361 #ifndef PRODUCT
duke@0 1362 } else if (!do_size) {
duke@0 1363 st->print("movq [rsp + #%d], %s\t# spill",
duke@0 1364 offset,
duke@0 1365 Matcher::regName[src_first]);
duke@0 1366 #endif
duke@0 1367 }
duke@0 1368 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1369 } else {
duke@0 1370 // 32-bit
duke@0 1371 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1372 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1373 int offset = ra_->reg2offset(dst_first);
duke@0 1374 if (cbuf) {
duke@0 1375 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1376 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1377 }
duke@0 1378 emit_opcode(*cbuf, 0x89);
duke@0 1379 encode_RegMem(*cbuf,
duke@0 1380 Matcher::_regEncode[src_first],
duke@0 1381 RSP_enc, 0x4, 0, offset,
duke@0 1382 false);
duke@0 1383 #ifndef PRODUCT
duke@0 1384 } else if (!do_size) {
duke@0 1385 st->print("movl [rsp + #%d], %s\t# spill",
duke@0 1386 offset,
duke@0 1387 Matcher::regName[src_first]);
duke@0 1388 #endif
duke@0 1389 }
duke@0 1390 return
duke@0 1391 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1392 ((Matcher::_regEncode[src_first] < 8)
duke@0 1393 ? 3
duke@0 1394 : 4); // REX
duke@0 1395 }
duke@0 1396 } else if (dst_first_rc == rc_int) {
duke@0 1397 // gpr -> gpr
duke@0 1398 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1399 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1400 // 64-bit
duke@0 1401 if (cbuf) {
duke@0 1402 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1403 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1404 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1405 } else {
duke@0 1406 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1407 }
duke@0 1408 } else {
duke@0 1409 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1410 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1411 } else {
duke@0 1412 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1413 }
duke@0 1414 }
duke@0 1415 emit_opcode(*cbuf, 0x8B);
duke@0 1416 emit_rm(*cbuf, 0x3,
duke@0 1417 Matcher::_regEncode[dst_first] & 7,
duke@0 1418 Matcher::_regEncode[src_first] & 7);
duke@0 1419 #ifndef PRODUCT
duke@0 1420 } else if (!do_size) {
duke@0 1421 st->print("movq %s, %s\t# spill",
duke@0 1422 Matcher::regName[dst_first],
duke@0 1423 Matcher::regName[src_first]);
duke@0 1424 #endif
duke@0 1425 }
duke@0 1426 return 3; // REX
duke@0 1427 } else {
duke@0 1428 // 32-bit
duke@0 1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1431 if (cbuf) {
duke@0 1432 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1433 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1434 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1435 }
duke@0 1436 } else {
duke@0 1437 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1438 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1439 } else {
duke@0 1440 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1441 }
duke@0 1442 }
duke@0 1443 emit_opcode(*cbuf, 0x8B);
duke@0 1444 emit_rm(*cbuf, 0x3,
duke@0 1445 Matcher::_regEncode[dst_first] & 7,
duke@0 1446 Matcher::_regEncode[src_first] & 7);
duke@0 1447 #ifndef PRODUCT
duke@0 1448 } else if (!do_size) {
duke@0 1449 st->print("movl %s, %s\t# spill",
duke@0 1450 Matcher::regName[dst_first],
duke@0 1451 Matcher::regName[src_first]);
duke@0 1452 #endif
duke@0 1453 }
duke@0 1454 return
duke@0 1455 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1456 ? 2
duke@0 1457 : 3; // REX
duke@0 1458 }
duke@0 1459 } else if (dst_first_rc == rc_float) {
duke@0 1460 // gpr -> xmm
duke@0 1461 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1462 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1463 // 64-bit
duke@0 1464 if (cbuf) {
duke@0 1465 emit_opcode(*cbuf, 0x66);
duke@0 1466 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1467 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1468 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1469 } else {
duke@0 1470 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1471 }
duke@0 1472 } else {
duke@0 1473 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1474 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1475 } else {
duke@0 1476 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1477 }
duke@0 1478 }
duke@0 1479 emit_opcode(*cbuf, 0x0F);
duke@0 1480 emit_opcode(*cbuf, 0x6E);
duke@0 1481 emit_rm(*cbuf, 0x3,
duke@0 1482 Matcher::_regEncode[dst_first] & 7,
duke@0 1483 Matcher::_regEncode[src_first] & 7);
duke@0 1484 #ifndef PRODUCT
duke@0 1485 } else if (!do_size) {
duke@0 1486 st->print("movdq %s, %s\t# spill",
duke@0 1487 Matcher::regName[dst_first],
duke@0 1488 Matcher::regName[src_first]);
duke@0 1489 #endif
duke@0 1490 }
duke@0 1491 return 5; // REX
duke@0 1492 } else {
duke@0 1493 // 32-bit
duke@0 1494 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1495 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1496 if (cbuf) {
duke@0 1497 emit_opcode(*cbuf, 0x66);
duke@0 1498 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1499 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1500 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1501 }
duke@0 1502 } else {
duke@0 1503 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1504 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1505 } else {
duke@0 1506 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1507 }
duke@0 1508 }
duke@0 1509 emit_opcode(*cbuf, 0x0F);
duke@0 1510 emit_opcode(*cbuf, 0x6E);
duke@0 1511 emit_rm(*cbuf, 0x3,
duke@0 1512 Matcher::_regEncode[dst_first] & 7,
duke@0 1513 Matcher::_regEncode[src_first] & 7);
duke@0 1514 #ifndef PRODUCT
duke@0 1515 } else if (!do_size) {
duke@0 1516 st->print("movdl %s, %s\t# spill",
duke@0 1517 Matcher::regName[dst_first],
duke@0 1518 Matcher::regName[src_first]);
duke@0 1519 #endif
duke@0 1520 }
duke@0 1521 return
duke@0 1522 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1523 ? 4
duke@0 1524 : 5; // REX
duke@0 1525 }
duke@0 1526 }
duke@0 1527 } else if (src_first_rc == rc_float) {
duke@0 1528 // xmm ->
duke@0 1529 if (dst_first_rc == rc_stack) {
duke@0 1530 // xmm -> mem
duke@0 1531 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1532 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1533 // 64-bit
duke@0 1534 int offset = ra_->reg2offset(dst_first);
duke@0 1535 if (cbuf) {
duke@0 1536 emit_opcode(*cbuf, 0xF2);
duke@0 1537 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1538 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1539 }
duke@0 1540 emit_opcode(*cbuf, 0x0F);
duke@0 1541 emit_opcode(*cbuf, 0x11);
duke@0 1542 encode_RegMem(*cbuf,
duke@0 1543 Matcher::_regEncode[src_first],
duke@0 1544 RSP_enc, 0x4, 0, offset,
duke@0 1545 false);
duke@0 1546 #ifndef PRODUCT
duke@0 1547 } else if (!do_size) {
duke@0 1548 st->print("movsd [rsp + #%d], %s\t# spill",
duke@0 1549 offset,
duke@0 1550 Matcher::regName[src_first]);
duke@0 1551 #endif
duke@0 1552 }
duke@0 1553 return
duke@0 1554 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1555 ((Matcher::_regEncode[src_first] < 8)
duke@0 1556 ? 5
duke@0 1557 : 6); // REX
duke@0 1558 } else {
duke@0 1559 // 32-bit
duke@0 1560 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1561 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1562 int offset = ra_->reg2offset(dst_first);
duke@0 1563 if (cbuf) {
duke@0 1564 emit_opcode(*cbuf, 0xF3);
duke@0 1565 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1566 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1567 }
duke@0 1568 emit_opcode(*cbuf, 0x0F);
duke@0 1569 emit_opcode(*cbuf, 0x11);
duke@0 1570 encode_RegMem(*cbuf,
duke@0 1571 Matcher::_regEncode[src_first],
duke@0 1572 RSP_enc, 0x4, 0, offset,
duke@0 1573 false);
duke@0 1574 #ifndef PRODUCT
duke@0 1575 } else if (!do_size) {
duke@0 1576 st->print("movss [rsp + #%d], %s\t# spill",
duke@0 1577 offset,
duke@0 1578 Matcher::regName[src_first]);
duke@0 1579 #endif
duke@0 1580 }
duke@0 1581 return
duke@0 1582 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1583 ((Matcher::_regEncode[src_first] < 8)
duke@0 1584 ? 5
duke@0 1585 : 6); // REX
duke@0 1586 }
duke@0 1587 } else if (dst_first_rc == rc_int) {
duke@0 1588 // xmm -> gpr
duke@0 1589 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1590 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1591 // 64-bit
duke@0 1592 if (cbuf) {
duke@0 1593 emit_opcode(*cbuf, 0x66);
duke@0 1594 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1595 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1596 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1597 } else {
duke@0 1598 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
duke@0 1599 }
duke@0 1600 } else {
duke@0 1601 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1602 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
duke@0 1603 } else {
duke@0 1604 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1605 }
duke@0 1606 }
duke@0 1607 emit_opcode(*cbuf, 0x0F);
duke@0 1608 emit_opcode(*cbuf, 0x7E);
duke@0 1609 emit_rm(*cbuf, 0x3,
duke@0 1610 Matcher::_regEncode[dst_first] & 7,
duke@0 1611 Matcher::_regEncode[src_first] & 7);
duke@0 1612 #ifndef PRODUCT
duke@0 1613 } else if (!do_size) {
duke@0 1614 st->print("movdq %s, %s\t# spill",
duke@0 1615 Matcher::regName[dst_first],
duke@0 1616 Matcher::regName[src_first]);
duke@0 1617 #endif
duke@0 1618 }
duke@0 1619 return 5; // REX
duke@0 1620 } else {
duke@0 1621 // 32-bit
duke@0 1622 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1623 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1624 if (cbuf) {
duke@0 1625 emit_opcode(*cbuf, 0x66);
duke@0 1626 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1627 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1628 emit_opcode(*cbuf, Assembler::REX_R); // attention!
duke@0 1629 }
duke@0 1630 } else {
duke@0 1631 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1632 emit_opcode(*cbuf, Assembler::REX_B); // attention!
duke@0 1633 } else {
duke@0 1634 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1635 }
duke@0 1636 }
duke@0 1637 emit_opcode(*cbuf, 0x0F);
duke@0 1638 emit_opcode(*cbuf, 0x7E);
duke@0 1639 emit_rm(*cbuf, 0x3,
duke@0 1640 Matcher::_regEncode[dst_first] & 7,
duke@0 1641 Matcher::_regEncode[src_first] & 7);
duke@0 1642 #ifndef PRODUCT
duke@0 1643 } else if (!do_size) {
duke@0 1644 st->print("movdl %s, %s\t# spill",
duke@0 1645 Matcher::regName[dst_first],
duke@0 1646 Matcher::regName[src_first]);
duke@0 1647 #endif
duke@0 1648 }
duke@0 1649 return
duke@0 1650 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1651 ? 4
duke@0 1652 : 5; // REX
duke@0 1653 }
duke@0 1654 } else if (dst_first_rc == rc_float) {
duke@0 1655 // xmm -> xmm
duke@0 1656 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1657 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1658 // 64-bit
duke@0 1659 if (cbuf) {
duke@0 1660 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 1661 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1662 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1663 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1664 }
duke@0 1665 } else {
duke@0 1666 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1667 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1668 } else {
duke@0 1669 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1670 }
duke@0 1671 }
duke@0 1672 emit_opcode(*cbuf, 0x0F);
duke@0 1673 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1674 emit_rm(*cbuf, 0x3,
duke@0 1675 Matcher::_regEncode[dst_first] & 7,
duke@0 1676 Matcher::_regEncode[src_first] & 7);
duke@0 1677 #ifndef PRODUCT
duke@0 1678 } else if (!do_size) {
duke@0 1679 st->print("%s %s, %s\t# spill",
duke@0 1680 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
duke@0 1681 Matcher::regName[dst_first],
duke@0 1682 Matcher::regName[src_first]);
duke@0 1683 #endif
duke@0 1684 }
duke@0 1685 return
duke@0 1686 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1687 ? 4
duke@0 1688 : 5; // REX
duke@0 1689 } else {
duke@0 1690 // 32-bit
duke@0 1691 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1692 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1693 if (cbuf) {
duke@0 1694 if (!UseXmmRegToRegMoveAll)
duke@0 1695 emit_opcode(*cbuf, 0xF3);
duke@0 1696 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1697 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1698 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1699 }
duke@0 1700 } else {
duke@0 1701 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1702 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1703 } else {
duke@0 1704 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1705 }
duke@0 1706 }
duke@0 1707 emit_opcode(*cbuf, 0x0F);
duke@0 1708 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1709 emit_rm(*cbuf, 0x3,
duke@0 1710 Matcher::_regEncode[dst_first] & 7,
duke@0 1711 Matcher::_regEncode[src_first] & 7);
duke@0 1712 #ifndef PRODUCT
duke@0 1713 } else if (!do_size) {
duke@0 1714 st->print("%s %s, %s\t# spill",
duke@0 1715 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
duke@0 1716 Matcher::regName[dst_first],
duke@0 1717 Matcher::regName[src_first]);
duke@0 1718 #endif
duke@0 1719 }
duke@0 1720 return
duke@0 1721 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1722 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 1723 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
duke@0 1724 }
duke@0 1725 }
duke@0 1726 }
duke@0 1727
duke@0 1728 assert(0," foo ");
duke@0 1729 Unimplemented();
duke@0 1730
duke@0 1731 return 0;
duke@0 1732 }
duke@0 1733
duke@0 1734 #ifndef PRODUCT
duke@0 1735 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
duke@0 1736 {
duke@0 1737 implementation(NULL, ra_, false, st);
duke@0 1738 }
duke@0 1739 #endif
duke@0 1740
duke@0 1741 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 1742 {
duke@0 1743 implementation(&cbuf, ra_, false, NULL);
duke@0 1744 }
duke@0 1745
duke@0 1746 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
duke@0 1747 {
duke@0 1748 return implementation(NULL, ra_, true, NULL);
duke@0 1749 }
duke@0 1750
duke@0 1751 //=============================================================================
duke@0 1752 #ifndef PRODUCT
duke@0 1753 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 1754 {
duke@0 1755 st->print("nop \t# %d bytes pad for loops and calls", _count);
duke@0 1756 }
duke@0 1757 #endif
duke@0 1758
duke@0 1759 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
duke@0 1760 {
duke@0 1761 MacroAssembler _masm(&cbuf);
duke@0 1762 __ nop(_count);
duke@0 1763 }
duke@0 1764
duke@0 1765 uint MachNopNode::size(PhaseRegAlloc*) const
duke@0 1766 {
duke@0 1767 return _count;
duke@0 1768 }
duke@0 1769
duke@0 1770
duke@0 1771 //=============================================================================
duke@0 1772 #ifndef PRODUCT
duke@0 1773 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1774 {
duke@0 1775 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1776 int reg = ra_->get_reg_first(this);
duke@0 1777 st->print("leaq %s, [rsp + #%d]\t# box lock",
duke@0 1778 Matcher::regName[reg], offset);
duke@0 1779 }
duke@0 1780 #endif
duke@0 1781
duke@0 1782 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1783 {
duke@0 1784 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1785 int reg = ra_->get_encode(this);
duke@0 1786 if (offset >= 0x80) {
duke@0 1787 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1788 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1789 emit_rm(cbuf, 0x2, reg & 7, 0x04);
duke@0 1790 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1791 emit_d32(cbuf, offset);
duke@0 1792 } else {
duke@0 1793 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1794 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1795 emit_rm(cbuf, 0x1, reg & 7, 0x04);
duke@0 1796 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1797 emit_d8(cbuf, offset);
duke@0 1798 }
duke@0 1799 }
duke@0 1800
duke@0 1801 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
duke@0 1802 {
duke@0 1803 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1804 return (offset < 0x80) ? 5 : 8; // REX
duke@0 1805 }
duke@0 1806
duke@0 1807 //=============================================================================
duke@0 1808
duke@0 1809 // emit call stub, compiled java to interpreter
duke@0 1810 void emit_java_to_interp(CodeBuffer& cbuf)
duke@0 1811 {
duke@0 1812 // Stub is fixed up when the corresponding call is converted from
duke@0 1813 // calling compiled code to calling interpreted code.
duke@0 1814 // movq rbx, 0
duke@0 1815 // jmp -5 # to self
duke@0 1816
duke@0 1817 address mark = cbuf.inst_mark(); // get mark within main instrs section
duke@0 1818
duke@0 1819 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1820 // That's why we must use the macroassembler to generate a stub.
duke@0 1821 MacroAssembler _masm(&cbuf);
duke@0 1822
duke@0 1823 address base =
duke@0 1824 __ start_a_stub(Compile::MAX_stubs_size);
duke@0 1825 if (base == NULL) return; // CodeBuffer::expand failed
duke@0 1826 // static stub relocation stores the instruction address of the call
duke@0 1827 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
duke@0 1828 // static stub relocation also tags the methodOop in the code-stream.
duke@0 1829 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
never@304 1830 // This is recognized as unresolved by relocs/nativeinst/ic code
duke@0 1831 __ jump(RuntimeAddress(__ pc()));
duke@0 1832
duke@0 1833 // Update current stubs pointer and restore code_end.
duke@0 1834 __ end_a_stub();
duke@0 1835 }
duke@0 1836
duke@0 1837 // size of call stub, compiled java to interpretor
duke@0 1838 uint size_java_to_interp()
duke@0 1839 {
duke@0 1840 return 15; // movq (1+1+8); jmp (1+4)
duke@0 1841 }
duke@0 1842
duke@0 1843 // relocation entries for call stub, compiled java to interpretor
duke@0 1844 uint reloc_java_to_interp()
duke@0 1845 {
duke@0 1846 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
duke@0 1847 }
duke@0 1848
duke@0 1849 //=============================================================================
duke@0 1850 #ifndef PRODUCT
duke@0 1851 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1852 {
coleenp@113 1853 if (UseCompressedOops) {
coleenp@113 1854 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
kvn@642 1855 if (Universe::narrow_oop_shift() != 0) {
kvn@642 1856 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
kvn@642 1857 }
coleenp@113 1858 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
coleenp@113 1859 } else {
coleenp@113 1860 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
coleenp@113 1861 "# Inline cache check", oopDesc::klass_offset_in_bytes());
coleenp@113 1862 }
duke@0 1863 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
duke@0 1864 st->print_cr("\tnop");
duke@0 1865 if (!OptoBreakpoint) {
duke@0 1866 st->print_cr("\tnop");
duke@0 1867 }
duke@0 1868 }
duke@0 1869 #endif
duke@0 1870
duke@0 1871 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1872 {
duke@0 1873 MacroAssembler masm(&cbuf);
duke@0 1874 #ifdef ASSERT
duke@0 1875 uint code_size = cbuf.code_size();
duke@0 1876 #endif
coleenp@113 1877 if (UseCompressedOops) {
coleenp@113 1878 masm.load_klass(rscratch1, j_rarg0);
never@304 1879 masm.cmpptr(rax, rscratch1);
coleenp@113 1880 } else {
never@304 1881 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
coleenp@113 1882 }
duke@0 1883
duke@0 1884 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
duke@0 1885
duke@0 1886 /* WARNING these NOPs are critical so that verified entry point is properly
duke@0 1887 aligned for patching by NativeJump::patch_verified_entry() */
duke@0 1888 int nops_cnt = 1;
duke@0 1889 if (!OptoBreakpoint) {
duke@0 1890 // Leave space for int3
duke@0 1891 nops_cnt += 1;
duke@0 1892 }
coleenp@113 1893 if (UseCompressedOops) {
coleenp@113 1894 // ??? divisible by 4 is aligned?
coleenp@113 1895 nops_cnt += 1;
coleenp@113 1896 }
duke@0 1897 masm.nop(nops_cnt);
duke@0 1898
duke@0 1899 assert(cbuf.code_size() - code_size == size(ra_),
duke@0 1900 "checking code size of inline cache node");
duke@0 1901 }
duke@0 1902
duke@0 1903 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
duke@0 1904 {
coleenp@113 1905 if (UseCompressedOops) {
kvn@642 1906 if (Universe::narrow_oop_shift() == 0) {
kvn@642 1907 return OptoBreakpoint ? 15 : 16;
kvn@642 1908 } else {
kvn@642 1909 return OptoBreakpoint ? 19 : 20;
kvn@642 1910 }
coleenp@113 1911 } else {
coleenp@113 1912 return OptoBreakpoint ? 11 : 12;
coleenp@113 1913 }
duke@0 1914 }
duke@0 1915
duke@0 1916
duke@0 1917 //=============================================================================
duke@0 1918 uint size_exception_handler()
duke@0 1919 {
duke@0 1920 // NativeCall instruction size is the same as NativeJump.
duke@0 1921 // Note that this value is also credited (in output.cpp) to
duke@0 1922 // the size of the code section.
duke@0 1923 return NativeJump::instruction_size;
duke@0 1924 }
duke@0 1925
duke@0 1926 // Emit exception handler code.
duke@0 1927 int emit_exception_handler(CodeBuffer& cbuf)
duke@0 1928 {
duke@0 1929
duke@0 1930 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1931 // That's why we must use the macroassembler to generate a handler.
duke@0 1932 MacroAssembler _masm(&cbuf);
duke@0 1933 address base =
duke@0 1934 __ start_a_stub(size_exception_handler());
duke@0 1935 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1936 int offset = __ offset();
duke@0 1937 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
duke@0 1938 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
duke@0 1939 __ end_a_stub();
duke@0 1940 return offset;
duke@0 1941 }
duke@0 1942
duke@0 1943 uint size_deopt_handler()
duke@0 1944 {
duke@0 1945 // three 5 byte instructions
duke@0 1946 return 15;
duke@0 1947 }
duke@0 1948
duke@0 1949 // Emit deopt handler code.
duke@0 1950 int emit_deopt_handler(CodeBuffer& cbuf)
duke@0 1951 {
duke@0 1952
duke@0 1953 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1954 // That's why we must use the macroassembler to generate a handler.
duke@0 1955 MacroAssembler _masm(&cbuf);
duke@0 1956 address base =
duke@0 1957 __ start_a_stub(size_deopt_handler());
duke@0 1958 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1959 int offset = __ offset();
duke@0 1960 address the_pc = (address) __ pc();
duke@0 1961 Label next;
duke@0 1962 // push a "the_pc" on the stack without destroying any registers
duke@0 1963 // as they all may be live.
duke@0 1964
duke@0 1965 // push address of "next"
duke@0 1966 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
duke@0 1967 __ bind(next);
duke@0 1968 // adjust it so it matches "the_pc"
never@304 1969 __ subptr(Address(rsp, 0), __ offset() - offset);
duke@0 1970 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
duke@0 1971 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
duke@0 1972 __ end_a_stub();
duke@0 1973 return offset;
duke@0 1974 }
duke@0 1975
duke@0 1976 static void emit_double_constant(CodeBuffer& cbuf, double x) {
duke@0 1977 int mark = cbuf.insts()->mark_off();
duke@0 1978 MacroAssembler _masm(&cbuf);
duke@0 1979 address double_address = __ double_constant(x);
duke@0 1980 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1981 emit_d32_reloc(cbuf,
duke@0 1982 (int) (double_address - cbuf.code_end() - 4),
duke@0 1983 internal_word_Relocation::spec(double_address),
duke@0 1984 RELOC_DISP32);
duke@0 1985 }
duke@0 1986
duke@0 1987 static void emit_float_constant(CodeBuffer& cbuf, float x) {
duke@0 1988 int mark = cbuf.insts()->mark_off();
duke@0 1989 MacroAssembler _masm(&cbuf);
duke@0 1990 address float_address = __ float_constant(x);
duke@0 1991 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1992 emit_d32_reloc(cbuf,
duke@0 1993 (int) (float_address - cbuf.code_end() - 4),
duke@0 1994 internal_word_Relocation::spec(float_address),
duke@0 1995 RELOC_DISP32);
duke@0 1996 }
duke@0 1997
duke@0 1998
twisti@775 1999 const bool Matcher::match_rule_supported(int opcode) {
twisti@775 2000 if (!has_match_rule(opcode))
twisti@775 2001 return false;
twisti@775 2002
twisti@775 2003 return true; // Per default match rules are supported.
twisti@775 2004 }
twisti@775 2005
duke@0 2006 int Matcher::regnum_to_fpu_offset(int regnum)
duke@0 2007 {
duke@0 2008 return regnum - 32; // The FP registers are in the second chunk
duke@0 2009 }
duke@0 2010
duke@0 2011 // This is UltraSparc specific, true just means we have fast l2f conversion
duke@0 2012 const bool Matcher::convL2FSupported(void) {
duke@0 2013 return true;
duke@0 2014 }
duke@0 2015
duke@0 2016 // Vector width in bytes
duke@0 2017 const uint Matcher::vector_width_in_bytes(void) {
duke@0 2018 return 8;
duke@0 2019 }
duke@0 2020
duke@0 2021 // Vector ideal reg
duke@0 2022 const uint Matcher::vector_ideal_reg(void) {
duke@0 2023 return Op_RegD;
duke@0 2024 }
duke@0 2025
duke@0 2026 // Is this branch offset short enough that a short branch can be used?
duke@0 2027 //
duke@0 2028 // NOTE: If the platform does not provide any short branch variants, then
duke@0 2029 // this method should return false for offset 0.
never@415 2030 bool Matcher::is_short_branch_offset(int rule, int offset) {
never@415 2031 // the short version of jmpConUCF2 contains multiple branches,
never@415 2032 // making the reach slightly less
never@415 2033 if (rule == jmpConUCF2_rule)
never@415 2034 return (-126 <= offset && offset <= 125);
never@415 2035 return (-128 <= offset && offset <= 127);
duke@0 2036 }
duke@0 2037
duke@0 2038 const bool Matcher::isSimpleConstant64(jlong value) {
duke@0 2039 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
duke@0 2040 //return value == (int) value; // Cf. storeImmL and immL32.
duke@0 2041
duke@0 2042 // Probably always true, even if a temp register is required.
duke@0 2043 return true;
duke@0 2044 }
duke@0 2045
duke@0 2046 // The ecx parameter to rep stosq for the ClearArray node is in words.
duke@0 2047 const bool Matcher::init_array_count_is_in_bytes = false;
duke@0 2048
duke@0 2049 // Threshold size for cleararray.
duke@0 2050 const int Matcher::init_array_short_size = 8 * BytesPerLong;
duke@0 2051
duke@0 2052 // Should the Matcher clone shifts on addressing modes, expecting them
duke@0 2053 // to be subsumed into complex addressing expressions or compute them
duke@0 2054 // into registers? True for Intel but false for most RISCs
duke@0 2055 const bool Matcher::clone_shift_expressions = true;
duke@0 2056
duke@0 2057 // Is it better to copy float constants, or load them directly from
duke@0 2058 // memory? Intel can load a float constant from a direct address,
duke@0 2059 // requiring no extra registers. Most RISCs will have to materialize
duke@0 2060 // an address into a register first, so they would do better to copy
duke@0 2061 // the constant from stack.
duke@0 2062 const bool Matcher::rematerialize_float_constants = true; // XXX
duke@0 2063
duke@0 2064 // If CPU can load and store mis-aligned doubles directly then no
duke@0 2065 // fixup is needed. Else we split the double into 2 integer pieces
duke@0 2066 // and move it piece-by-piece. Only happens when passing doubles into
duke@0 2067 // C code as the Java calling convention forces doubles to be aligned.
duke@0 2068 const bool Matcher::misaligned_doubles_ok = true;
duke@0 2069
duke@0 2070 // No-op on amd64
duke@0 2071 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
duke@0 2072
duke@0 2073 // Advertise here if the CPU requires explicit rounding operations to
duke@0 2074 // implement the UseStrictFP mode.
duke@0 2075 const bool Matcher::strict_fp_requires_explicit_rounding = true;
duke@0 2076
duke@0 2077 // Do floats take an entire double register or just half?
duke@0 2078 const bool Matcher::float_in_double = true;
duke@0 2079 // Do ints take an entire long register or just half?
duke@0 2080 const bool Matcher::int_in_long = true;
duke@0 2081
duke@0 2082 // Return whether or not this register is ever used as an argument.
duke@0 2083 // This function is used on startup to build the trampoline stubs in
duke@0 2084 // generateOptoStub. Registers not mentioned will be killed by the VM
duke@0 2085 // call in the trampoline, and arguments in those registers not be
duke@0 2086 // available to the callee.
duke@0 2087 bool Matcher::can_be_java_arg(int reg)
duke@0 2088 {
duke@0 2089 return
duke@0 2090 reg == RDI_num || reg == RDI_H_num ||
duke@0 2091 reg == RSI_num || reg == RSI_H_num ||
duke@0 2092 reg == RDX_num || reg == RDX_H_num ||
duke@0 2093 reg == RCX_num || reg == RCX_H_num ||
duke@0 2094 reg == R8_num || reg == R8_H_num ||
duke@0 2095 reg == R9_num || reg == R9_H_num ||
coleenp@113 2096 reg == R12_num || reg == R12_H_num ||
duke@0 2097 reg == XMM0_num || reg == XMM0_H_num ||
duke@0 2098 reg == XMM1_num || reg == XMM1_H_num ||
duke@0 2099 reg == XMM2_num || reg == XMM2_H_num ||
duke@0 2100 reg == XMM3_num || reg == XMM3_H_num ||
duke@0 2101 reg == XMM4_num || reg == XMM4_H_num ||
duke@0 2102 reg == XMM5_num || reg == XMM5_H_num ||
duke@0 2103 reg == XMM6_num || reg == XMM6_H_num ||
duke@0 2104 reg == XMM7_num || reg == XMM7_H_num;
duke@0 2105 }
duke@0 2106
duke@0 2107 bool Matcher::is_spillable_arg(int reg)
duke@0 2108 {
duke@0 2109 return can_be_java_arg(reg);
duke@0 2110 }
duke@0 2111
duke@0 2112 // Register for DIVI projection of divmodI
duke@0 2113 RegMask Matcher::divI_proj_mask() {
duke@0 2114 return INT_RAX_REG_mask;
duke@0 2115 }
duke@0 2116
duke@0 2117 // Register for MODI projection of divmodI
duke@0 2118 RegMask Matcher::modI_proj_mask() {
duke@0 2119 return INT_RDX_REG_mask;
duke@0 2120 }
duke@0 2121
duke@0 2122 // Register for DIVL projection of divmodL
duke@0 2123 RegMask Matcher::divL_proj_mask() {
duke@0 2124 return LONG_RAX_REG_mask;
duke@0 2125 }
duke@0 2126
duke@0 2127 // Register for MODL projection of divmodL
duke@0 2128 RegMask Matcher::modL_proj_mask() {
duke@0 2129 return LONG_RDX_REG_mask;
duke@0 2130 }
duke@0 2131
twisti@1137 2132 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
twisti@1137 2133 return PTR_RBP_REG_mask;
twisti@1137 2134 }
twisti@1137 2135
coleenp@113 2136 static Address build_address(int b, int i, int s, int d) {
coleenp@113 2137 Register index = as_Register(i);
coleenp@113 2138 Address::ScaleFactor scale = (Address::ScaleFactor)s;
coleenp@113 2139 if (index == rsp) {
coleenp@113 2140 index = noreg;
coleenp@113 2141 scale = Address::no_scale;
coleenp@113 2142 }
coleenp@113 2143 Address addr(as_Register(b), index, scale, d);
coleenp@113 2144 return addr;
coleenp@113 2145 }
coleenp@113 2146
duke@0 2147 %}
duke@0 2148
duke@0 2149 //----------ENCODING BLOCK-----------------------------------------------------
duke@0 2150 // This block specifies the encoding classes used by the compiler to
duke@0 2151 // output byte streams. Encoding classes are parameterized macros
duke@0 2152 // used by Machine Instruction Nodes in order to generate the bit
duke@0 2153 // encoding of the instruction. Operands specify their base encoding
duke@0 2154 // interface with the interface keyword. There are currently
duke@0 2155 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
duke@0 2156 // COND_INTER. REG_INTER causes an operand to generate a function
duke@0 2157 // which returns its register number when queried. CONST_INTER causes
duke@0 2158 // an operand to generate a function which returns the value of the
duke@0 2159 // constant when queried. MEMORY_INTER causes an operand to generate
duke@0 2160 // four functions which return the Base Register, the Index Register,
duke@0 2161 // the Scale Value, and the Offset Value of the operand when queried.
duke@0 2162 // COND_INTER causes an operand to generate six functions which return
duke@0 2163 // the encoding code (ie - encoding bits for the instruction)
duke@0 2164 // associated with each basic boolean condition for a conditional
duke@0 2165 // instruction.
duke@0 2166 //
duke@0 2167 // Instructions specify two basic values for encoding. Again, a
duke@0 2168 // function is available to check if the constant displacement is an
duke@0 2169 // oop. They use the ins_encode keyword to specify their encoding
duke@0 2170 // classes (which must be a sequence of enc_class names, and their
duke@0 2171 // parameters, specified in the encoding block), and they use the
duke@0 2172 // opcode keyword to specify, in order, their primary, secondary, and
duke@0 2173 // tertiary opcode. Only the opcode sections which a particular
duke@0 2174 // instruction needs for encoding need to be specified.
duke@0 2175 encode %{
duke@0 2176 // Build emit functions for each basic byte or larger field in the
duke@0 2177 // intel encoding scheme (opcode, rm, sib, immediate), and call them
duke@0 2178 // from C++ code in the enc_class source block. Emit functions will
duke@0 2179 // live in the main source block for now. In future, we can
duke@0 2180 // generalize this by adding a syntax that specifies the sizes of
duke@0 2181 // fields in an order, so that the adlc can build the emit functions
duke@0 2182 // automagically
duke@0 2183
duke@0 2184 // Emit primary opcode
duke@0 2185 enc_class OpcP
duke@0 2186 %{
duke@0 2187 emit_opcode(cbuf, $primary);
duke@0 2188 %}
duke@0 2189
duke@0 2190 // Emit secondary opcode
duke@0 2191 enc_class OpcS
duke@0 2192 %{
duke@0 2193 emit_opcode(cbuf, $secondary);
duke@0 2194 %}
duke@0 2195
duke@0 2196 // Emit tertiary opcode
duke@0 2197 enc_class OpcT
duke@0 2198 %{
duke@0 2199 emit_opcode(cbuf, $tertiary);
duke@0 2200 %}
duke@0 2201
duke@0 2202 // Emit opcode directly
duke@0 2203 enc_class Opcode(immI d8)
duke@0 2204 %{
duke@0 2205 emit_opcode(cbuf, $d8$$constant);
duke@0 2206 %}
duke@0 2207
duke@0 2208 // Emit size prefix
duke@0 2209 enc_class SizePrefix
duke@0 2210 %{
duke@0 2211 emit_opcode(cbuf, 0x66);
duke@0 2212 %}
duke@0 2213
duke@0 2214 enc_class reg(rRegI reg)
duke@0 2215 %{
duke@0 2216 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
duke@0 2217 %}
duke@0 2218
duke@0 2219 enc_class reg_reg(rRegI dst, rRegI src)
duke@0 2220 %{
duke@0 2221 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2222 %}
duke@0 2223
duke@0 2224 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
duke@0 2225 %{
duke@0 2226 emit_opcode(cbuf, $opcode$$constant);
duke@0 2227 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2228 %}
duke@0 2229
duke@0 2230 enc_class cmpfp_fixup()
duke@0 2231 %{
duke@0 2232 // jnp,s exit
duke@0 2233 emit_opcode(cbuf, 0x7B);
duke@0 2234 emit_d8(cbuf, 0x0A);
duke@0 2235
duke@0 2236 // pushfq
duke@0 2237 emit_opcode(cbuf, 0x9C);
duke@0 2238
duke@0 2239 // andq $0xffffff2b, (%rsp)
duke@0 2240 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2241 emit_opcode(cbuf, 0x81);
duke@0 2242 emit_opcode(cbuf, 0x24);
duke@0 2243 emit_opcode(cbuf, 0x24);
duke@0 2244 emit_d32(cbuf, 0xffffff2b);
duke@0 2245
duke@0 2246 // popfq
duke@0 2247 emit_opcode(cbuf, 0x9D);
duke@0 2248
duke@0 2249 // nop (target for branch to avoid branch to branch)
duke@0 2250 emit_opcode(cbuf, 0x90);
duke@0 2251 %}
duke@0 2252
duke@0 2253 enc_class cmpfp3(rRegI dst)
duke@0 2254 %{
duke@0 2255 int dstenc = $dst$$reg;
duke@0 2256
duke@0 2257 // movl $dst, -1
duke@0 2258 if (dstenc >= 8) {
duke@0 2259 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2260 }
duke@0 2261 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 2262 emit_d32(cbuf, -1);
duke@0 2263
duke@0 2264 // jp,s done
duke@0 2265 emit_opcode(cbuf, 0x7A);
duke@0 2266 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
duke@0 2267
duke@0 2268 // jb,s done
duke@0 2269 emit_opcode(cbuf, 0x72);
duke@0 2270 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 2271
duke@0 2272 // setne $dst
duke@0 2273 if (dstenc >= 4) {
duke@0 2274 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2275 }
duke@0 2276 emit_opcode(cbuf, 0x0F);
duke@0 2277 emit_opcode(cbuf, 0x95);
duke@0 2278 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 2279
duke@0 2280 // movzbl $dst, $dst
duke@0 2281 if (dstenc >= 4) {
duke@0 2282 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 2283 }
duke@0 2284 emit_opcode(cbuf, 0x0F);
duke@0 2285 emit_opcode(cbuf, 0xB6);
duke@0 2286 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 2287 %}
duke@0 2288
duke@0 2289 enc_class cdql_enc(no_rax_rdx_RegI div)
duke@0 2290 %{
duke@0 2291 // Full implementation of Java idiv and irem; checks for
duke@0 2292 // special case as described in JVM spec., p.243 & p.271.
duke@0 2293 //
duke@0 2294 // normal case special case
duke@0 2295 //
duke@0 2296 // input : rax: dividend min_int
duke@0 2297 // reg: divisor -1
duke@0 2298 //
duke@0 2299 // output: rax: quotient (= rax idiv reg) min_int
duke@0 2300 // rdx: remainder (= rax irem reg) 0
duke@0 2301 //
duke@0 2302 // Code sequnce:
duke@0 2303 //
duke@0 2304 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
duke@0 2305 // 5: 75 07/08 jne e <normal>
duke@0 2306 // 7: 33 d2 xor %edx,%edx
duke@0 2307 // [div >= 8 -> offset + 1]
duke@0 2308 // [REX_B]
duke@0 2309 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2310 // c: 74 03/04 je 11 <done>
duke@0 2311 // 000000000000000e <normal>:
duke@0 2312 // e: 99 cltd
duke@0 2313 // [div >= 8 -> offset + 1]
duke@0 2314 // [REX_B]
duke@0 2315 // f: f7 f9 idiv $div
duke@0 2316 // 0000000000000011 <done>:
duke@0 2317
duke@0 2318 // cmp $0x80000000,%eax
duke@0 2319 emit_opcode(cbuf, 0x3d);
duke@0 2320 emit_d8(cbuf, 0x00);
duke@0 2321 emit_d8(cbuf, 0x00);
duke@0 2322 emit_d8(cbuf, 0x00);
duke@0 2323 emit_d8(cbuf, 0x80);
duke@0 2324
duke@0 2325 // jne e <normal>
duke@0 2326 emit_opcode(cbuf, 0x75);
duke@0 2327 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
duke@0 2328
duke@0 2329 // xor %edx,%edx
duke@0 2330 emit_opcode(cbuf, 0x33);
duke@0 2331 emit_d8(cbuf, 0xD2);
duke@0 2332
duke@0 2333 // cmp $0xffffffffffffffff,%ecx
duke@0 2334 if ($div$$reg >= 8) {
duke@0 2335 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2336 }
duke@0 2337 emit_opcode(cbuf, 0x83);
duke@0 2338 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2339 emit_d8(cbuf, 0xFF);
duke@0 2340
duke@0 2341 // je 11 <done>
duke@0 2342 emit_opcode(cbuf, 0x74);
duke@0 2343 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
duke@0 2344
duke@0 2345 // <normal>
duke@0 2346 // cltd
duke@0 2347 emit_opcode(cbuf, 0x99);
duke@0 2348
duke@0 2349 // idivl (note: must be emitted by the user of this rule)
duke@0 2350 // <done>
duke@0 2351 %}
duke@0 2352
duke@0 2353 enc_class cdqq_enc(no_rax_rdx_RegL div)
duke@0 2354 %{
duke@0 2355 // Full implementation of Java ldiv and lrem; checks for
duke@0 2356 // special case as described in JVM spec., p.243 & p.271.
duke@0 2357 //
duke@0 2358 // normal case special case
duke@0 2359 //
duke@0 2360 // input : rax: dividend min_long
duke@0 2361 // reg: divisor -1
duke@0 2362 //
duke@0 2363 // output: rax: quotient (= rax idiv reg) min_long
duke@0 2364 // rdx: remainder (= rax irem reg) 0
duke@0 2365 //
duke@0 2366 // Code sequnce:
duke@0 2367 //
duke@0 2368 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
duke@0 2369 // 7: 00 00 80
duke@0 2370 // a: 48 39 d0 cmp %rdx,%rax
duke@0 2371 // d: 75 08 jne 17 <normal>
duke@0 2372 // f: 33 d2 xor %edx,%edx
duke@0 2373 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2374 // 15: 74 05 je 1c <done>
duke@0 2375 // 0000000000000017 <normal>:
duke@0 2376 // 17: 48 99 cqto
duke@0 2377 // 19: 48 f7 f9 idiv $div
duke@0 2378 // 000000000000001c <done>:
duke@0 2379
duke@0 2380 // mov $0x8000000000000000,%rdx
duke@0 2381 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2382 emit_opcode(cbuf, 0xBA);
duke@0 2383 emit_d8(cbuf, 0x00);
duke@0 2384 emit_d8(cbuf, 0x00);
duke@0 2385 emit_d8(cbuf, 0x00);
duke@0 2386 emit_d8(cbuf, 0x00);
duke@0 2387 emit_d8(cbuf, 0x00);
duke@0 2388 emit_d8(cbuf, 0x00);
duke@0 2389 emit_d8(cbuf, 0x00);
duke@0 2390 emit_d8(cbuf, 0x80);
duke@0 2391
duke@0 2392 // cmp %rdx,%rax
duke@0 2393 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2394 emit_opcode(cbuf, 0x39);
duke@0 2395 emit_d8(cbuf, 0xD0);
duke@0 2396
duke@0 2397 // jne 17 <normal>
duke@0 2398 emit_opcode(cbuf, 0x75);
duke@0 2399 emit_d8(cbuf, 0x08);
duke@0 2400
duke@0 2401 // xor %edx,%edx
duke@0 2402 emit_opcode(cbuf, 0x33);
duke@0 2403 emit_d8(cbuf, 0xD2);
duke@0 2404
duke@0 2405 // cmp $0xffffffffffffffff,$div
duke@0 2406 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
duke@0 2407 emit_opcode(cbuf, 0x83);
duke@0 2408 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2409 emit_d8(cbuf, 0xFF);
duke@0 2410
duke@0 2411 // je 1e <done>
duke@0 2412 emit_opcode(cbuf, 0x74);
duke@0 2413 emit_d8(cbuf, 0x05);
duke@0 2414
duke@0 2415 // <normal>
duke@0 2416 // cqto
duke@0 2417 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2418 emit_opcode(cbuf, 0x99);
duke@0 2419
duke@0 2420 // idivq (note: must be emitted by the user of this rule)
duke@0 2421 // <done>
duke@0 2422 %}
duke@0 2423
duke@0 2424 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
duke@0 2425 enc_class OpcSE(immI imm)
duke@0 2426 %{
duke@0 2427 // Emit primary opcode and set sign-extend bit
duke@0 2428 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2429 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2430 emit_opcode(cbuf, $primary | 0x02);
duke@0 2431 } else {
duke@0 2432 // 32-bit immediate
duke@0 2433 emit_opcode(cbuf, $primary);
duke@0 2434 }
duke@0 2435 %}
duke@0 2436
duke@0 2437 enc_class OpcSErm(rRegI dst, immI imm)
duke@0 2438 %{
duke@0 2439 // OpcSEr/m
duke@0 2440 int dstenc = $dst$$reg;
duke@0 2441 if (dstenc >= 8) {
duke@0 2442 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2443 dstenc -= 8;
duke@0 2444 }
duke@0 2445 // Emit primary opcode and set sign-extend bit
duke@0 2446 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2447 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2448 emit_opcode(cbuf, $primary | 0x02);
duke@0 2449 } else {
duke@0 2450 // 32-bit immediate
duke@0 2451 emit_opcode(cbuf, $primary);
duke@0 2452 }
duke@0 2453 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2454 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2455 %}
duke@0 2456
duke@0 2457 enc_class OpcSErm_wide(rRegL dst, immI imm)
duke@0 2458 %{
duke@0 2459 // OpcSEr/m
duke@0 2460 int dstenc = $dst$$reg;
duke@0 2461 if (dstenc < 8) {
duke@0 2462 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2463 } else {
duke@0 2464 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2465 dstenc -= 8;
duke@0 2466 }
duke@0 2467 // Emit primary opcode and set sign-extend bit
duke@0 2468 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2469 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2470 emit_opcode(cbuf, $primary | 0x02);
duke@0 2471 } else {
duke@0 2472 // 32-bit immediate
duke@0 2473 emit_opcode(cbuf, $primary);
duke@0 2474 }
duke@0 2475 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2476 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2477 %}
duke@0 2478
duke@0 2479 enc_class Con8or32(immI imm)
duke@0 2480 %{
duke@0 2481 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2482 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2483 $$$emit8$imm$$constant;
duke@0 2484 } else {
duke@0 2485 // 32-bit immediate
duke@0 2486 $$$emit32$imm$$constant;
duke@0 2487 }
duke@0 2488 %}
duke@0 2489
duke@0 2490 enc_class Lbl(label labl)
duke@0 2491 %{
duke@0 2492 // JMP, CALL
duke@0 2493 Label* l = $labl$$label;
duke@0 2494 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2495 %}
duke@0 2496
duke@0 2497 enc_class LblShort(label labl)
duke@0 2498 %{
duke@0 2499 // JMP, CALL
duke@0 2500 Label* l = $labl$$label;
duke@0 2501 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2502 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2503 emit_d8(cbuf, disp);
duke@0 2504 %}
duke@0 2505
duke@0 2506 enc_class opc2_reg(rRegI dst)
duke@0 2507 %{
duke@0 2508 // BSWAP
duke@0 2509 emit_cc(cbuf, $secondary, $dst$$reg);
duke@0 2510 %}
duke@0 2511
duke@0 2512 enc_class opc3_reg(rRegI dst)
duke@0 2513 %{
duke@0 2514 // BSWAP
duke@0 2515 emit_cc(cbuf, $tertiary, $dst$$reg);
duke@0 2516 %}
duke@0 2517
duke@0 2518 enc_class reg_opc(rRegI div)
duke@0 2519 %{
duke@0 2520 // INC, DEC, IDIV, IMOD, JMP indirect, ...
duke@0 2521 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
duke@0 2522 %}
duke@0 2523
duke@0 2524 enc_class Jcc(cmpOp cop, label labl)
duke@0 2525 %{
duke@0 2526 // JCC
duke@0 2527 Label* l = $labl$$label;
duke@0 2528 $$$emit8$primary;
duke@0 2529 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2530 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2531 %}
duke@0 2532
duke@0 2533 enc_class JccShort (cmpOp cop, label labl)
duke@0 2534 %{
duke@0 2535 // JCC
duke@0 2536 Label *l = $labl$$label;
duke@0 2537 emit_cc(cbuf, $primary, $cop$$cmpcode);
duke@0 2538 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2539 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2540 emit_d8(cbuf, disp);
duke@0 2541 %}
duke@0 2542
duke@0 2543 enc_class enc_cmov(cmpOp cop)
duke@0 2544 %{
duke@0 2545 // CMOV
duke@0 2546 $$$emit8$primary;
duke@0 2547 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2548 %}
duke@0 2549
duke@0 2550 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
duke@0 2551 %{
duke@0 2552 // Invert sense of branch from sense of cmov
duke@0 2553 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2554 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
duke@0 2555 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 2556 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
duke@0 2557 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
duke@0 2558 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
duke@0 2559 if ($dst$$reg < 8) {
duke@0 2560 if ($src$$reg >= 8) {
duke@0 2561 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2562 }
duke@0 2563 } else {
duke@0 2564 if ($src$$reg < 8) {
duke@0 2565 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2566 } else {
duke@0 2567 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2568 }
duke@0 2569 }
duke@0 2570 emit_opcode(cbuf, 0x0F);
duke@0 2571 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2572 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2573 %}
duke@0 2574
duke@0 2575 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
duke@0 2576 %{
duke@0 2577 // Invert sense of branch from sense of cmov
duke@0 2578 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2579 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
duke@0 2580
duke@0 2581 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
duke@0 2582 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 2583 if ($dst$$reg < 8) {
duke@0 2584 if ($src$$reg >= 8) {
duke@0 2585 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2586 }
duke@0 2587 } else {
duke@0 2588 if ($src$$reg < 8) {
duke@0 2589 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2590 } else {
duke@0 2591 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2592 }
duke@0 2593 }
duke@0 2594 emit_opcode(cbuf, 0x0F);
duke@0 2595 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2596 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2597 %}
duke@0 2598
duke@0 2599 enc_class enc_PartialSubtypeCheck()
duke@0 2600 %{
duke@0 2601 Register Rrdi = as_Register(RDI_enc); // result register
duke@0 2602 Register Rrax = as_Register(RAX_enc); // super class
duke@0 2603 Register Rrcx = as_Register(RCX_enc); // killed
duke@0 2604 Register Rrsi = as_Register(RSI_enc); // sub class
jrose@644 2605 Label miss;
jrose@644 2606 const bool set_cond_codes = true;
duke@0 2607
duke@0 2608 MacroAssembler _masm(&cbuf);
jrose@644 2609 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
jrose@644 2610 NULL, &miss,
jrose@644 2611 /*set_cond_codes:*/ true);
duke@0 2612 if ($primary) {
never@304 2613 __ xorptr(Rrdi, Rrdi);
duke@0 2614 }
duke@0 2615 __ bind(miss);
duke@0 2616 %}
duke@0 2617
duke@0 2618 enc_class Java_To_Interpreter(method meth)
duke@0 2619 %{
duke@0 2620 // CALL Java_To_Interpreter
duke@0 2621 // This is the instruction starting address for relocation info.
duke@0 2622 cbuf.set_inst_mark();
duke@0 2623 $$$emit8$primary;
duke@0 2624 // CALL directly to the runtime
duke@0 2625 emit_d32_reloc(cbuf,
duke@0 2626 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2627 runtime_call_Relocation::spec(),
duke@0 2628 RELOC_DISP32);
duke@0 2629 %}
duke@0 2630
twisti@1137 2631 enc_class preserve_SP %{
twisti@1137 2632 debug_only(int off0 = cbuf.code_size());
twisti@1137 2633 MacroAssembler _masm(&cbuf);
twisti@1137 2634 // RBP is preserved across all calls, even compiled calls.
twisti@1137 2635 // Use it to preserve RSP in places where the callee might change the SP.
twisti@1137 2636 __ movptr(rbp, rsp);
twisti@1137 2637 debug_only(int off1 = cbuf.code_size());
twisti@1137 2638 assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
twisti@1137 2639 %}
twisti@1137 2640
twisti@1137 2641 enc_class restore_SP %{
twisti@1137 2642 MacroAssembler _masm(&cbuf);
twisti@1137 2643 __ movptr(rsp, rbp);
twisti@1137 2644 %}
twisti@1137 2645
duke@0 2646 enc_class Java_Static_Call(method meth)
duke@0 2647 %{
duke@0 2648 // JAVA STATIC CALL
duke@0 2649 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
duke@0 2650 // determine who we intended to call.
duke@0 2651 cbuf.set_inst_mark();
duke@0 2652 $$$emit8$primary;
duke@0 2653
duke@0 2654 if (!_method) {
duke@0 2655 emit_d32_reloc(cbuf,
duke@0 2656 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2657 runtime_call_Relocation::spec(),
duke@0 2658 RELOC_DISP32);
duke@0 2659 } else if (_optimized_virtual) {
duke@0 2660 emit_d32_reloc(cbuf,
duke@0 2661 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2662 opt_virtual_call_Relocation::spec(),
duke@0 2663 RELOC_DISP32);
duke@0 2664 } else {
duke@0 2665 emit_d32_reloc(cbuf,
duke@0 2666 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2667 static_call_Relocation::spec(),
duke@0 2668 RELOC_DISP32);
duke@0 2669 }
duke@0 2670 if (_method) {
duke@0 2671 // Emit stub for static call
duke@0 2672 emit_java_to_interp(cbuf);
duke@0 2673 }
duke@0 2674 %}
duke@0 2675
duke@0 2676 enc_class Java_Dynamic_Call(method meth)
duke@0 2677 %{
duke@0 2678 // JAVA DYNAMIC CALL
duke@0 2679 // !!!!!
duke@0 2680 // Generate "movq rax, -1", placeholder instruction to load oop-info
duke@0 2681 // emit_call_dynamic_prologue( cbuf );
duke@0 2682 cbuf.set_inst_mark();
duke@0 2683
duke@0 2684 // movq rax, -1
duke@0 2685 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2686 emit_opcode(cbuf, 0xB8 | RAX_enc);
duke@0 2687 emit_d64_reloc(cbuf,
duke@0 2688 (int64_t) Universe::non_oop_word(),
duke@0 2689 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
duke@0 2690 address virtual_call_oop_addr = cbuf.inst_mark();
duke@0 2691 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
duke@0 2692 // who we intended to call.
duke@0 2693 cbuf.set_inst_mark();
duke@0 2694 $$$emit8$primary;
duke@0 2695 emit_d32_reloc(cbuf,
duke@0 2696 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2697 virtual_call_Relocation::spec(virtual_call_oop_addr),
duke@0 2698 RELOC_DISP32);
duke@0 2699 %}
duke@0 2700
duke@0 2701 enc_class Java_Compiled_Call(method meth)
duke@0 2702 %{
duke@0 2703 // JAVA COMPILED CALL
duke@0 2704 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
duke@0 2705
duke@0 2706 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
duke@0 2707 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
duke@0 2708
duke@0 2709 // callq *disp(%rax)
duke@0 2710 cbuf.set_inst_mark();
duke@0 2711 $$$emit8$primary;
duke@0 2712 if (disp < 0x80) {
duke@0 2713 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
duke@0 2714 emit_d8(cbuf, disp); // Displacement
duke@0 2715 } else {
duke@0 2716 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
duke@0 2717 emit_d32(cbuf, disp); // Displacement
duke@0 2718 }
duke@0 2719 %}
duke@0 2720
duke@0 2721 enc_class reg_opc_imm(rRegI dst, immI8 shift)
duke@0 2722 %{
duke@0 2723 // SAL, SAR, SHR
duke@0 2724 int dstenc = $dst$$reg;
duke@0 2725 if (dstenc >= 8) {
duke@0 2726 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2727 dstenc -= 8;
duke@0 2728 }
duke@0 2729 $$$emit8$primary;
duke@0 2730 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2731 $$$emit8$shift$$constant;
duke@0 2732 %}
duke@0 2733
duke@0 2734 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
duke@0 2735 %{
duke@0 2736 // SAL, SAR, SHR
duke@0 2737 int dstenc = $dst$$reg;
duke@0 2738 if (dstenc < 8) {
duke@0 2739 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2740 } else {
duke@0 2741 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2742 dstenc -= 8;
duke@0 2743 }
duke@0 2744 $$$emit8$primary;
duke@0 2745 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2746 $$$emit8$shift$$constant;
duke@0 2747 %}
duke@0 2748
duke@0 2749 enc_class load_immI(rRegI dst, immI src)
duke@0 2750 %{
duke@0 2751 int dstenc = $dst$$reg;
duke@0 2752 if (dstenc >= 8) {
duke@0 2753 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2754 dstenc -= 8;
duke@0 2755 }
duke@0 2756 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2757 $$$emit32$src$$constant;
duke@0 2758 %}
duke@0 2759
duke@0 2760 enc_class load_immL(rRegL dst, immL src)
duke@0 2761 %{
duke@0 2762 int dstenc = $dst$$reg;
duke@0 2763 if (dstenc < 8) {
duke@0 2764 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2765 } else {
duke@0 2766 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2767 dstenc -= 8;
duke@0 2768 }
duke@0 2769 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2770 emit_d64(cbuf, $src$$constant);
duke@0 2771 %}
duke@0 2772
duke@0 2773 enc_class load_immUL32(rRegL dst, immUL32 src)
duke@0 2774 %{
duke@0 2775 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2776 int dstenc = $dst$$reg;
duke@0 2777 if (dstenc >= 8) {
duke@0 2778 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2779 dstenc -= 8;
duke@0 2780 }
duke@0 2781 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2782 $$$emit32$src$$constant;
duke@0 2783 %}
duke@0 2784
duke@0 2785 enc_class load_immL32(rRegL dst, immL32 src)
duke@0 2786 %{
duke@0 2787 int dstenc = $dst$$reg;
duke@0 2788 if (dstenc < 8) {
duke@0 2789 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2790 } else {
duke@0 2791 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2792 dstenc -= 8;
duke@0 2793 }
duke@0 2794 emit_opcode(cbuf, 0xC7);
duke@0 2795 emit_rm(cbuf, 0x03, 0x00, dstenc);
duke@0 2796 $$$emit32$src$$constant;
duke@0 2797 %}
duke@0 2798
duke@0 2799 enc_class load_immP31(rRegP dst, immP32 src)
duke@0 2800 %{
duke@0 2801 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2802 int dstenc = $dst$$reg;
duke@0 2803 if (dstenc >= 8) {
duke@0 2804 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2805 dstenc -= 8;
duke@0 2806 }
duke@0 2807 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2808 $$$emit32$src$$constant;
duke@0 2809 %}
duke@0 2810
duke@0 2811 enc_class load_immP(rRegP dst, immP src)
duke@0 2812 %{
duke@0 2813 int dstenc = $dst$$reg;
duke@0 2814 if (dstenc < 8) {
duke@0 2815 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2816 } else {
duke@0 2817 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2818 dstenc -= 8;
duke@0 2819 }
duke@0 2820 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2821 // This next line should be generated from ADLC
duke@0 2822 if ($src->constant_is_oop()) {
duke@0 2823 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
duke@0 2824 } else {
duke@0 2825 emit_d64(cbuf, $src$$constant);
duke@0 2826 }
duke@0 2827 %}
duke@0 2828
duke@0 2829 enc_class load_immF(regF dst, immF con)
duke@0 2830 %{
duke@0 2831 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2832 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2833 emit_float_constant(cbuf, $con$$constant);
duke@0 2834 %}
duke@0 2835
duke@0 2836 enc_class load_immD(regD dst, immD con)
duke@0 2837 %{
duke@0 2838 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2839 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2840 emit_double_constant(cbuf, $con$$constant);
duke@0 2841 %}
duke@0 2842
duke@0 2843 enc_class load_conF (regF dst, immF con) %{ // Load float constant
duke@0 2844 emit_opcode(cbuf, 0xF3);
duke@0 2845 if ($dst$$reg >= 8) {
duke@0 2846 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2847 }
duke@0 2848 emit_opcode(cbuf, 0x0F);
duke@0 2849 emit_opcode(cbuf, 0x10);
duke@0 2850 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2851 emit_float_constant(cbuf, $con$$constant);
duke@0 2852 %}
duke@0 2853
duke@0 2854 enc_class load_conD (regD dst, immD con) %{ // Load double constant
duke@0 2855 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
duke@0 2856 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 2857 if ($dst$$reg >= 8) {
duke@0 2858 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2859 }
duke@0 2860 emit_opcode(cbuf, 0x0F);
duke@0 2861 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 2862 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2863 emit_double_constant(cbuf, $con$$constant);
duke@0 2864 %}
duke@0 2865
duke@0 2866 // Encode a reg-reg copy. If it is useless, then empty encoding.
duke@0 2867 enc_class enc_copy(rRegI dst, rRegI src)
duke@0 2868 %{
duke@0 2869 encode_copy(cbuf, $dst$$reg, $src$$reg);
duke@0 2870 %}
duke@0 2871
duke@0 2872 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
duke@0 2873 enc_class enc_CopyXD( RegD dst, RegD src ) %{
duke@0 2874 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
duke@0 2875 %}
duke@0 2876
duke@0 2877 enc_class enc_copy_always(rRegI dst, rRegI src)
duke@0 2878 %{
duke@0 2879 int srcenc = $src$$reg;
duke@0 2880 int dstenc = $dst$$reg;
duke@0 2881
duke@0 2882 if (dstenc < 8) {
duke@0 2883 if (srcenc >= 8) {
duke@0 2884 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2885 srcenc -= 8;
duke@0 2886 }
duke@0 2887 } else {
duke@0 2888 if (srcenc < 8) {
duke@0 2889 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2890 } else {
duke@0 2891 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2892 srcenc -= 8;
duke@0 2893 }
duke@0 2894 dstenc -= 8;
duke@0 2895 }
duke@0 2896
duke@0 2897 emit_opcode(cbuf, 0x8B);
duke@0 2898 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2899 %}
duke@0 2900
duke@0 2901 enc_class enc_copy_wide(rRegL dst, rRegL src)
duke@0 2902 %{
duke@0 2903 int srcenc = $src$$reg;
duke@0 2904 int dstenc = $dst$$reg;
duke@0 2905
duke@0 2906 if (dstenc != srcenc) {
duke@0 2907 if (dstenc < 8) {
duke@0 2908 if (srcenc < 8) {
duke@0 2909 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2910 } else {
duke@0 2911 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2912 srcenc -= 8;
duke@0 2913 }
duke@0 2914 } else {
duke@0 2915 if (srcenc < 8) {
duke@0 2916 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2917 } else {
duke@0 2918 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2919 srcenc -= 8;
duke@0 2920 }
duke@0 2921 dstenc -= 8;
duke@0 2922 }
duke@0 2923 emit_opcode(cbuf, 0x8B);
duke@0 2924 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2925 }
duke@0 2926 %}
duke@0 2927
duke@0 2928 enc_class Con32(immI src)
duke@0 2929 %{
duke@0 2930 // Output immediate
duke@0 2931 $$$emit32$src$$constant;
duke@0 2932 %}
duke@0 2933
duke@0 2934 enc_class Con64(immL src)
duke@0 2935 %{
duke@0 2936 // Output immediate
duke@0 2937 emit_d64($src$$constant);
duke@0 2938 %}
duke@0 2939
duke@0 2940 enc_class Con32F_as_bits(immF src)
duke@0 2941 %{
duke@0 2942 // Output Float immediate bits
duke@0 2943 jfloat jf = $src$$constant;
duke@0 2944 jint jf_as_bits = jint_cast(jf);
duke@0 2945 emit_d32(cbuf, jf_as_bits);
duke@0 2946 %}
duke@0 2947
duke@0 2948 enc_class Con16(immI src)
duke@0 2949 %{
duke@0 2950 // Output immediate
duke@0 2951 $$$emit16$src$$constant;
duke@0 2952 %}
duke@0 2953
duke@0 2954 // How is this different from Con32??? XXX
duke@0 2955 enc_class Con_d32(immI src)
duke@0 2956 %{
duke@0 2957 emit_d32(cbuf,$src$$constant);
duke@0 2958 %}
duke@0 2959
duke@0 2960 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
duke@0 2961 // Output immediate memory reference
duke@0 2962 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
duke@0 2963 emit_d32(cbuf, 0x00);
duke@0 2964 %}
duke@0 2965
duke@0 2966 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
duke@0 2967 MacroAssembler masm(&cbuf);
duke@0 2968
duke@0 2969 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2970 Register dest_reg = as_Register($dest$$reg);
duke@0 2971 address table_base = masm.address_table_constant(_index2label);
duke@0 2972
duke@0 2973 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2974 // to do that and the compiler is using that register as one it can allocate.
duke@0 2975 // So we build it all by hand.
duke@0 2976 // Address index(noreg, switch_reg, Address::times_1);
duke@0 2977 // ArrayAddress dispatch(table, index);
duke@0 2978
duke@0 2979 Address dispatch(dest_reg, switch_reg, Address::times_1);
duke@0 2980
duke@0 2981 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 2982 masm.jmp(dispatch);
duke@0 2983 %}
duke@0 2984
duke@0 2985 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
duke@0 2986 MacroAssembler masm(&cbuf);
duke@0 2987
duke@0 2988 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2989 Register dest_reg = as_Register($dest$$reg);
duke@0 2990 address table_base = masm.address_table_constant(_index2label);
duke@0 2991
duke@0 2992 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2993 // to do that and the compiler is using that register as one it can allocate.
duke@0 2994 // So we build it all by hand.
duke@0 2995 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 2996 // ArrayAddress dispatch(table, index);
duke@0 2997
duke@0 2998 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 2999
duke@0 3000 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 3001 masm.jmp(dispatch);
duke@0 3002 %}
duke@0 3003
duke@0 3004 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
duke@0 3005 MacroAssembler masm(&cbuf);
duke@0 3006
duke@0 3007 Register switch_reg = as_Register($switch_val$$reg);
duke@0 3008 Register dest_reg = as_Register($dest$$reg);
duke@0 3009 address table_base = masm.address_table_constant(_index2label);
duke@0 3010
duke@0 3011 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 3012 // to do that and the compiler is using that register as one it can allocate.
duke@0 3013 // So we build it all by hand.
duke@0 3014 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 3015 // ArrayAddress dispatch(table, index);
duke@0 3016
duke@0 3017 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 3018 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 3019 masm.jmp(dispatch);
duke@0 3020
duke@0 3021 %}
duke@0 3022
duke@0 3023 enc_class lock_prefix()
duke@0 3024 %{
duke@0 3025 if (os::is_MP()) {
duke@0 3026 emit_opcode(cbuf, 0xF0); // lock
duke@0 3027 }
duke@0 3028 %}
duke@0 3029
duke@0 3030 enc_class REX_mem(memory mem)
duke@0 3031 %{
duke@0 3032 if ($mem$$base >= 8) {
duke@0 3033 if ($mem$$index < 8) {
duke@0 3034 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3035 } else {
duke@0 3036 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3037 }
duke@0 3038 } else {
duke@0 3039 if ($mem$$index >= 8) {
duke@0 3040 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3041 }
duke@0 3042 }
duke@0 3043 %}
duke@0 3044
duke@0 3045 enc_class REX_mem_wide(memory mem)
duke@0 3046 %{
duke@0 3047 if ($mem$$base >= 8) {
duke@0 3048 if ($mem$$index < 8) {
duke@0 3049 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3050 } else {
duke@0 3051 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3052 }
duke@0 3053 } else {
duke@0 3054 if ($mem$$index < 8) {
duke@0 3055 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3056 } else {
duke@0 3057 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3058 }
duke@0 3059 }
duke@0 3060 %}
duke@0 3061
duke@0 3062 // for byte regs
duke@0 3063 enc_class REX_breg(rRegI reg)
duke@0 3064 %{
duke@0 3065 if ($reg$$reg >= 4) {
duke@0 3066 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3067 }
duke@0 3068 %}
duke@0 3069
duke@0 3070 // for byte regs
duke@0 3071 enc_class REX_reg_breg(rRegI dst, rRegI src)
duke@0 3072 %{
duke@0 3073 if ($dst$$reg < 8) {
duke@0 3074 if ($src$$reg >= 4) {
duke@0 3075 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3076 }
duke@0 3077 } else {
duke@0 3078 if ($src$$reg < 8) {
duke@0 3079 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3080 } else {
duke@0 3081 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3082 }
duke@0 3083 }
duke@0 3084 %}
duke@0 3085
duke@0 3086 // for byte regs
duke@0 3087 enc_class REX_breg_mem(rRegI reg, memory mem)
duke@0 3088 %{
duke@0 3089 if ($reg$$reg < 8) {
duke@0 3090 if ($mem$$base < 8) {
duke@0 3091 if ($mem$$index >= 8) {
duke@0 3092 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3093 } else if ($reg$$reg >= 4) {
duke@0 3094 emit_opcode(cbuf, Assembler::REX);
duke@0 3095 }
duke@0 3096 } else {
duke@0 3097 if ($mem$$index < 8) {
duke@0 3098 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3099 } else {
duke@0 3100 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3101 }
duke@0 3102 }
duke@0 3103 } else {
duke@0 3104 if ($mem$$base < 8) {
duke@0 3105 if ($mem$$index < 8) {
duke@0 3106 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3107 } else {
duke@0 3108 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3109 }
duke@0 3110 } else {
duke@0 3111 if ($mem$$index < 8) {
duke@0 3112 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3113 } else {
duke@0 3114 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3115 }
duke@0 3116 }
duke@0 3117 }
duke@0 3118 %}
duke@0 3119
duke@0 3120 enc_class REX_reg(rRegI reg)
duke@0 3121 %{
duke@0 3122 if ($reg$$reg >= 8) {
duke@0 3123 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3124 }
duke@0 3125 %}
duke@0 3126
duke@0 3127 enc_class REX_reg_wide(rRegI reg)
duke@0 3128 %{
duke@0 3129 if ($reg$$reg < 8) {
duke@0 3130 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3131 } else {
duke@0 3132 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3133 }
duke@0 3134 %}
duke@0 3135
duke@0 3136 enc_class REX_reg_reg(rRegI dst, rRegI src)
duke@0 3137 %{
duke@0 3138 if ($dst$$reg < 8) {
duke@0 3139 if ($src$$reg >= 8) {
duke@0 3140 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3141 }
duke@0 3142 } else {
duke@0 3143 if ($src$$reg < 8) {
duke@0 3144 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3145 } else {
duke@0 3146 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3147 }
duke@0 3148 }
duke@0 3149 %}
duke@0 3150
duke@0 3151 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
duke@0 3152 %{
duke@0 3153 if ($dst$$reg < 8) {
duke@0 3154 if ($src$$reg < 8) {
duke@0 3155 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3156 } else {
duke@0 3157 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3158 }
duke@0 3159 } else {
duke@0 3160 if ($src$$reg < 8) {
duke@0 3161 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3162 } else {
duke@0 3163 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3164 }
duke@0 3165 }
duke@0 3166 %}
duke@0 3167
duke@0 3168 enc_class REX_reg_mem(rRegI reg, memory mem)
duke@0 3169 %{
duke@0 3170 if ($reg$$reg < 8) {
duke@0 3171 if ($mem$$base < 8) {
duke@0 3172 if ($mem$$index >= 8) {
duke@0 3173 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3174 }
duke@0 3175 } else {
duke@0 3176 if ($mem$$index < 8) {
duke@0 3177 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3178 } else {
duke@0 3179 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3180 }
duke@0 3181 }
duke@0 3182 } else {
duke@0 3183 if ($mem$$base < 8) {
duke@0 3184 if ($mem$$index < 8) {
duke@0 3185 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3186 } else {
duke@0 3187 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3188 }
duke@0 3189 } else {
duke@0 3190 if ($mem$$index < 8) {
duke@0 3191 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3192 } else {
duke@0 3193 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3194 }
duke@0 3195 }
duke@0 3196 }
duke@0 3197 %}
duke@0 3198
duke@0 3199 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
duke@0 3200 %{
duke@0 3201 if ($reg$$reg < 8) {
duke@0 3202 if ($mem$$base < 8) {
duke@0 3203 if ($mem$$index < 8) {
duke@0 3204 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3205 } else {
duke@0 3206 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3207 }
duke@0 3208 } else {
duke@0 3209 if ($mem$$index < 8) {
duke@0 3210 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3211 } else {
duke@0 3212 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3213 }
duke@0 3214 }
duke@0 3215 } else {
duke@0 3216 if ($mem$$base < 8) {
duke@0 3217 if ($mem$$index < 8) {
duke@0 3218 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3219 } else {
duke@0 3220 emit_opcode(cbuf, Assembler::REX_WRX);
duke@0 3221 }
duke@0 3222 } else {
duke@0 3223 if ($mem$$index < 8) {
duke@0 3224 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3225 } else {
duke@0 3226 emit_opcode(cbuf, Assembler::REX_WRXB);
duke@0 3227 }
duke@0 3228 }
duke@0 3229 }
duke@0 3230 %}
duke@0 3231
duke@0 3232 enc_class reg_mem(rRegI ereg, memory mem)
duke@0 3233 %{
duke@0 3234 // High registers handle in encode_RegMem
duke@0 3235 int reg = $ereg$$reg;
duke@0 3236 int base = $mem$$base;
duke@0 3237 int index = $mem$$index;
duke@0 3238 int scale = $mem$$scale;
duke@0 3239 int disp = $mem$$disp;
duke@0 3240 bool disp_is_oop = $mem->disp_is_oop();
duke@0 3241
duke@0 3242 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
duke@0 3243 %}
duke@0 3244
duke@0 3245 enc_class RM_opc_mem(immI rm_opcode, memory mem)
duke@0 3246 %{
duke@0 3247 int rm_byte_opcode = $rm_opcode$$constant;
duke@0 3248
duke@0 3249 // High registers handle in encode_RegMem
duke@0 3250 int base = $mem$$base;
duke@0 3251 int index = $mem$$index;
duke@0 3252 int scale = $mem$$scale;
duke@0 3253 int displace = $mem$$disp;
duke@0 3254
duke@0 3255 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
duke@0 3256 // working with static
duke@0 3257 // globals
duke@0 3258 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
duke@0 3259 disp_is_oop);
duke@0 3260 %}
duke@0 3261
duke@0 3262 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
duke@0 3263 %{
duke@0 3264 int reg_encoding = $dst$$reg;
duke@0 3265 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
duke@0 3266 int index = 0x04; // 0x04 indicates no index
duke@0 3267 int scale = 0x00; // 0x00 indicates no scale
duke@0 3268 int displace = $src1$$constant; // 0x00 indicates no displacement
duke@0 3269 bool disp_is_oop = false;
duke@0 3270 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
duke@0 3271 disp_is_oop);
duke@0 3272 %}
duke@0 3273
duke@0 3274 enc_class neg_reg(rRegI dst)
duke@0 3275 %{
duke@0 3276 int dstenc = $dst$$reg;
duke@0 3277 if (dstenc >= 8) {
duke@0 3278 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3279 dstenc -= 8;
duke@0 3280 }
duke@0 3281 // NEG $dst
duke@0 3282 emit_opcode(cbuf, 0xF7);
duke@0 3283 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3284 %}
duke@0 3285
duke@0 3286 enc_class neg_reg_wide(rRegI dst)
duke@0 3287 %{
duke@0 3288 int dstenc = $dst$$reg;
duke@0 3289 if (dstenc < 8) {
duke@0 3290 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3291 } else {
duke@0 3292 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3293 dstenc -= 8;
duke@0 3294 }
duke@0 3295 // NEG $dst
duke@0 3296 emit_opcode(cbuf, 0xF7);
duke@0 3297 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3298 %}
duke@0 3299
duke@0 3300 enc_class setLT_reg(rRegI dst)
duke@0 3301 %{
duke@0 3302 int dstenc = $dst$$reg;
duke@0 3303 if (dstenc >= 8) {
duke@0 3304 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3305 dstenc -= 8;
duke@0 3306 } else if (dstenc >= 4) {
duke@0 3307 emit_opcode(cbuf, Assembler::REX);
duke@0 3308 }
duke@0 3309 // SETLT $dst
duke@0 3310 emit_opcode(cbuf, 0x0F);
duke@0 3311 emit_opcode(cbuf, 0x9C);
duke@0 3312 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3313 %}
duke@0 3314
duke@0 3315 enc_class setNZ_reg(rRegI dst)
duke@0 3316 %{
duke@0 3317 int dstenc = $dst$$reg;
duke@0 3318 if (dstenc >= 8) {
duke@0 3319 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3320 dstenc -= 8;
duke@0 3321 } else if (dstenc >= 4) {
duke@0 3322 emit_opcode(cbuf, Assembler::REX);
duke@0 3323 }
duke@0 3324 // SETNZ $dst
duke@0 3325 emit_opcode(cbuf, 0x0F);
duke@0 3326 emit_opcode(cbuf, 0x95);
duke@0 3327 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3328 %}
duke@0 3329
duke@0 3330 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
duke@0 3331 rcx_RegI tmp)
duke@0 3332 %{
duke@0 3333 // cadd_cmpLT
duke@0 3334
duke@0 3335 int tmpReg = $tmp$$reg;
duke@0 3336
duke@0 3337 int penc = $p$$reg;
duke@0 3338 int qenc = $q$$reg;
duke@0 3339 int yenc = $y$$reg;
duke@0 3340
duke@0 3341 // subl $p,$q
duke@0 3342 if (penc < 8) {
duke@0 3343 if (qenc >= 8) {
duke@0 3344 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3345 }
duke@0 3346 } else {
duke@0 3347 if (qenc < 8) {
duke@0 3348 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3349 } else {
duke@0 3350 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3351 }
duke@0 3352 }
duke@0 3353 emit_opcode(cbuf, 0x2B);
duke@0 3354 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
duke@0 3355
duke@0 3356 // sbbl $tmp, $tmp
duke@0 3357 emit_opcode(cbuf, 0x1B);
duke@0 3358 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
duke@0 3359
duke@0 3360 // andl $tmp, $y
duke@0 3361 if (yenc >= 8) {
duke@0 3362 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3363 }
duke@0 3364 emit_opcode(cbuf, 0x23);
duke@0 3365 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
duke@0 3366
duke@0 3367 // addl $p,$tmp
duke@0 3368 if (penc >= 8) {
duke@0 3369 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3370 }
duke@0 3371 emit_opcode(cbuf, 0x03);
duke@0 3372 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
duke@0 3373 %}
duke@0 3374
duke@0 3375 // Compare the lonogs and set -1, 0, or 1 into dst
duke@0 3376 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
duke@0 3377 %{
duke@0 3378 int src1enc = $src1$$reg;
duke@0 3379 int src2enc = $src2$$reg;
duke@0 3380 int dstenc = $dst$$reg;
duke@0 3381
duke@0 3382 // cmpq $src1, $src2
duke@0 3383 if (src1enc < 8) {
duke@0 3384 if (src2enc < 8) {
duke@0 3385 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3386 } else {
duke@0 3387 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3388 }
duke@0 3389 } else {
duke@0 3390 if (src2enc < 8) {
duke@0 3391 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3392 } else {
duke@0 3393 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3394 }
duke@0 3395 }
duke@0 3396 emit_opcode(cbuf, 0x3B);
duke@0 3397 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
duke@0 3398
duke@0 3399 // movl $dst, -1
duke@0 3400 if (dstenc >= 8) {
duke@0 3401 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3402 }
duke@0 3403 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 3404 emit_d32(cbuf, -1);
duke@0 3405
duke@0 3406 // jl,s done
duke@0 3407 emit_opcode(cbuf, 0x7C);
duke@0 3408 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 3409
duke@0 3410 // setne $dst
duke@0 3411 if (dstenc >= 4) {
duke@0 3412 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3413 }
duke@0 3414 emit_opcode(cbuf, 0x0F);
duke@0 3415 emit_opcode(cbuf, 0x95);
duke@0 3416 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 3417
duke@0 3418 // movzbl $dst, $dst
duke@0 3419 if (dstenc >= 4) {
duke@0 3420 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 3421 }
duke@0 3422 emit_opcode(cbuf, 0x0F);
duke@0 3423 emit_opcode(cbuf, 0xB6);
duke@0 3424 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 3425 %}
duke@0 3426
duke@0 3427 enc_class Push_ResultXD(regD dst) %{
duke@0 3428 int dstenc = $dst$$reg;
duke@0 3429
duke@0 3430 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
duke@0 3431
duke@0 3432 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
duke@0 3433 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 3434 if (dstenc >= 8) {
duke@0 3435 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3436 }
duke@0 3437 emit_opcode (cbuf, 0x0F );
duke@0 3438 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
duke@0 3439 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3440
duke@0 3441 // add rsp,8
duke@0 3442 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3443 emit_opcode(cbuf,0x83);
duke@0 3444 emit_rm(cbuf,0x3, 0x0, RSP_enc);
duke@0 3445 emit_d8(cbuf,0x08);
duke@0 3446 %}
duke@0 3447
duke@0 3448 enc_class Push_SrcXD(regD src) %{
duke@0 3449 int srcenc = $src$$reg;
duke@0 3450
duke@0 3451 // subq rsp,#8
duke@0 3452 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3453 emit_opcode(cbuf, 0x83);
duke@0 3454 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3455 emit_d8(cbuf, 0x8);
duke@0 3456
duke@0 3457 // movsd [rsp],src
duke@0 3458 emit_opcode(cbuf, 0xF2);
duke@0 3459 if (srcenc >= 8) {
duke@0 3460 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3461 }
duke@0 3462 emit_opcode(cbuf, 0x0F);
duke@0 3463 emit_opcode(cbuf, 0x11);
duke@0 3464 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3465
duke@0 3466 // fldd [rsp]
duke@0 3467 emit_opcode(cbuf, 0x66);
duke@0 3468 emit_opcode(cbuf, 0xDD);
duke@0 3469 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
duke@0 3470 %}
duke@0 3471
duke@0 3472
duke@0 3473 enc_class movq_ld(regD dst, memory mem) %{
duke@0 3474 MacroAssembler _masm(&cbuf);
twisti@624 3475 __ movq($dst$$XMMRegister, $mem$$Address);
duke@0 3476 %}
duke@0 3477
duke@0 3478 enc_class movq_st(memory mem, regD src) %{
duke@0 3479 MacroAssembler _masm(&cbuf);
twisti@624 3480 __ movq($mem$$Address, $src$$XMMRegister);
duke@0 3481 %}
duke@0 3482
duke@0 3483 enc_class pshufd_8x8(regF dst, regF src) %{
duke@0 3484 MacroAssembler _masm(&cbuf);
duke@0 3485
duke@0 3486 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
duke@0 3487 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
duke@0 3488 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
duke@0 3489 %}
duke@0 3490
duke@0 3491 enc_class pshufd_4x16(regF dst, regF src) %{
duke@0 3492 MacroAssembler _masm(&cbuf);
duke@0 3493
duke@0 3494 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
duke@0 3495 %}
duke@0 3496
duke@0 3497 enc_class pshufd(regD dst, regD src, int mode) %{
duke@0 3498 MacroAssembler _masm(&cbuf);
duke@0 3499
duke@0 3500 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
duke@0 3501 %}
duke@0 3502
duke@0 3503 enc_class pxor(regD dst, regD src) %{
duke@0 3504 MacroAssembler _masm(&cbuf);
duke@0 3505
duke@0 3506 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
duke@0 3507 %}
duke@0 3508
duke@0 3509 enc_class mov_i2x(regD dst, rRegI src) %{
duke@0 3510 MacroAssembler _masm(&cbuf);
duke@0 3511
duke@0 3512 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
duke@0 3513 %}
duke@0 3514
duke@0 3515 // obj: object to lock
duke@0 3516 // box: box address (header location) -- killed
duke@0 3517 // tmp: rax -- killed
duke@0 3518 // scr: rbx -- killed
duke@0 3519 //
duke@0 3520 // What follows is a direct transliteration of fast_lock() and fast_unlock()
duke@0 3521 // from i486.ad. See that file for comments.
duke@0 3522 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
duke@0 3523 // use the shorter encoding. (Movl clears the high-order 32-bits).
duke@0 3524
duke@0 3525
duke@0 3526 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
duke@0 3527 %{
duke@0 3528 Register objReg = as_Register((int)$obj$$reg);
duke@0 3529 Register boxReg = as_Register((int)$box$$reg);
duke@0 3530 Register tmpReg = as_Register($tmp$$reg);
duke@0 3531 Register scrReg = as_Register($scr$$reg);
duke@0 3532 MacroAssembler masm(&cbuf);
duke@0 3533
duke@0 3534 // Verify uniqueness of register assignments -- necessary but not sufficient
duke@0 3535 assert (objReg != boxReg && objReg != tmpReg &&
duke@0 3536 objReg != scrReg && tmpReg != scrReg, "invariant") ;
duke@0 3537
duke@0 3538 if (_counters != NULL) {
duke@0 3539 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
duke@0 3540 }
duke@0 3541 if (EmitSync & 1) {
never@304 3542 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@304 3543 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@304 3544 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
duke@0 3545 } else
duke@0 3546 if (EmitSync & 2) {
duke@0 3547 Label DONE_LABEL;
duke@0 3548 if (UseBiasedLocking) {
duke@0 3549 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
duke@0 3550 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
duke@0 3551 }
never@304 3552 // QQQ was movl...
never@304 3553 masm.movptr(tmpReg, 0x1);
never@304 3554 masm.orptr(tmpReg, Address(objReg, 0));
never@304 3555 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3556 if (os::is_MP()) {
duke@0 3557 masm.lock();
duke@0 3558 }
never@304 3559 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3560 masm.jcc(Assembler::equal, DONE_LABEL);
duke@0 3561
duke@0 3562 // Recursive locking
never@304 3563 masm.subptr(tmpReg, rsp);
never@304 3564 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 3565 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3566
duke@0 3567 masm.bind(DONE_LABEL);
duke@0 3568 masm.nop(); // avoid branch to branch
duke@0 3569 } else {
duke@0 3570 Label DONE_LABEL, IsInflated, Egress;
duke@0 3571
never@304 3572 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@304 3573 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
never@304 3574 masm.jcc (Assembler::notZero, IsInflated) ;
never@304 3575
duke@0 3576 // it's stack-locked, biased or neutral
duke@0 3577 // TODO: optimize markword triage order to reduce the number of
duke@0 3578 // conditional branches in the most common cases.
duke@0 3579 // Beware -- there's a subtle invariant that fetch of the markword
duke@0 3580 // at [FETCH], below, will never observe a biased encoding (*101b).
duke@0 3581 // If this invariant is not held we'll suffer exclusion (safety) failure.
duke@0 3582
kvn@420 3583 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3584 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
never@304 3585 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
duke@0 3586 }
duke@0 3587
never@304 3588 // was q will it destroy high?
never@304 3589 masm.orl (tmpReg, 1) ;
never@304 3590 masm.movptr(Address(boxReg, 0), tmpReg) ;
never@304 3591 if (os::is_MP()) { masm.lock(); }
never@304 3592 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3593 if (_counters != NULL) {
duke@0 3594 masm.cond_inc32(Assembler::equal,
duke@0 3595 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3596 }
duke@0 3597 masm.jcc (Assembler::equal, DONE_LABEL);
duke@0 3598
duke@0 3599 // Recursive locking
never@304 3600 masm.subptr(tmpReg, rsp);
never@304 3601 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 3602 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3603 if (_counters != NULL) {
duke@0 3604 masm.cond_inc32(Assembler::equal,
duke@0 3605 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3606 }
duke@0 3607 masm.jmp (DONE_LABEL) ;
duke@0 3608
duke@0 3609 masm.bind (IsInflated) ;
duke@0 3610 // It's inflated
duke@0 3611
duke@0 3612 // TODO: someday avoid the ST-before-CAS penalty by
duke@0 3613 // relocating (deferring) the following ST.
duke@0 3614 // We should also think about trying a CAS without having
duke@0 3615 // fetched _owner. If the CAS is successful we may
duke@0 3616 // avoid an RTO->RTS upgrade on the $line.
never@304 3617 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@304 3618 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@304 3619
never@304 3620 masm.mov (boxReg, tmpReg) ;
never@304 3621 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@304 3622 masm.testptr(tmpReg, tmpReg) ;
never@304 3623 masm.jcc (Assembler::notZero, DONE_LABEL) ;
duke@0 3624
duke@0 3625 // It's inflated and appears unlocked
never@304 3626 if (os::is_MP()) { masm.lock(); }
never@304 3627 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@0 3628 // Intentional fall-through into DONE_LABEL ...
duke@0 3629
duke@0 3630 masm.bind (DONE_LABEL) ;
duke@0 3631 masm.nop () ; // avoid jmp to jmp
duke@0 3632 }
duke@0 3633 %}
duke@0 3634
duke@0 3635 // obj: object to unlock
duke@0 3636 // box: box address (displaced header location), killed
duke@0 3637 // RBX: killed tmp; cannot be obj nor box
duke@0 3638 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
duke@0 3639 %{
duke@0 3640
duke@0 3641 Register objReg = as_Register($obj$$reg);
duke@0 3642 Register boxReg = as_Register($box$$reg);
duke@0 3643 Register tmpReg = as_Register($tmp$$reg);
duke@0 3644 MacroAssembler masm(&cbuf);
duke@0 3645
never@304 3646 if (EmitSync & 4) {
never@304 3647 masm.cmpptr(rsp, 0) ;
duke@0 3648 } else
duke@0 3649 if (EmitSync & 8) {
duke@0 3650 Label DONE_LABEL;
duke@0 3651 if (UseBiasedLocking) {
duke@0 3652 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3653 }
duke@0 3654
duke@0 3655 // Check whether the displaced header is 0
duke@0 3656 //(=> recursive unlock)
never@304 3657 masm.movptr(tmpReg, Address(boxReg, 0));
never@304 3658 masm.testptr(tmpReg, tmpReg);
duke@0 3659 masm.jcc(Assembler::zero, DONE_LABEL);
duke@0 3660
duke@0 3661 // If not recursive lock, reset the header to displaced header
duke@0 3662 if (os::is_MP()) {
duke@0 3663 masm.lock();
duke@0 3664 }
never@304 3665 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3666 masm.bind(DONE_LABEL);
duke@0 3667 masm.nop(); // avoid branch to branch
duke@0 3668 } else {
duke@0 3669 Label DONE_LABEL, Stacked, CheckSucc ;
duke@0 3670
kvn@420 3671 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3672 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3673 }
never@304 3674
never@304 3675 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@304 3676 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
never@304 3677 masm.jcc (Assembler::zero, DONE_LABEL) ;
never@304 3678 masm.testl (tmpReg, 0x02) ;
never@304 3679 masm.jcc (Assembler::zero, Stacked) ;
never@304 3680
duke@0 3681 // It's inflated
never@304 3682 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@304 3683 masm.xorptr(boxReg, r15_thread) ;
never@304 3684 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
never@304 3685 masm.jcc (Assembler::notZero, DONE_LABEL) ;
never@304 3686 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
never@304 3687 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
never@304 3688 masm.jcc (Assembler::notZero, CheckSucc) ;
never@304 3689 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
never@304 3690 masm.jmp (DONE_LABEL) ;
never@304 3691
never@304 3692 if ((EmitSync & 65536) == 0) {
duke@0 3693 Label LSuccess, LGoSlowPath ;
duke@0 3694 masm.bind (CheckSucc) ;
never@304 3695 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3696 masm.jcc (Assembler::zero, LGoSlowPath) ;
duke@0 3697
duke@0 3698 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
duke@0 3699 // the explicit ST;MEMBAR combination, but masm doesn't currently support
duke@0 3700 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
duke@0 3701 // are all faster when the write buffer is populated.
never@304 3702 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3703 if (os::is_MP()) {
never@304 3704 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
duke@0 3705 }
never@304 3706 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3707 masm.jcc (Assembler::notZero, LSuccess) ;
duke@0 3708
never@304 3709 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
duke@0 3710 if (os::is_MP()) { masm.lock(); }
never@304 3711 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
duke@0 3712 masm.jcc (Assembler::notEqual, LSuccess) ;
duke@0 3713 // Intentional fall-through into slow-path
duke@0 3714
duke@0 3715 masm.bind (LGoSlowPath) ;
duke@0 3716 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
duke@0 3717 masm.jmp (DONE_LABEL) ;
duke@0 3718
duke@0 3719 masm.bind (LSuccess) ;
duke@0 3720 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
duke@0 3721 masm.jmp (DONE_LABEL) ;
duke@0 3722 }
duke@0 3723
never@304 3724 masm.bind (Stacked) ;
never@304 3725 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
never@304 3726 if (os::is_MP()) { masm.lock(); }
never@304 3727 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3728
duke@0 3729 if (EmitSync & 65536) {
duke@0 3730 masm.bind (CheckSucc) ;
duke@0 3731 }
duke@0 3732 masm.bind(DONE_LABEL);
duke@0 3733 if (EmitSync & 32768) {
duke@0 3734 masm.nop(); // avoid branch to branch
duke@0 3735 }
duke@0 3736 }
duke@0 3737 %}
duke@0 3738
rasbold@169 3739
duke@0 3740 enc_class enc_rethrow()
duke@0 3741 %{
duke@0 3742 cbuf.set_inst_mark();
duke@0 3743 emit_opcode(cbuf, 0xE9); // jmp entry
duke@0 3744 emit_d32_reloc(cbuf,
duke@0 3745 (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
duke@0 3746 runtime_call_Relocation::spec(),
duke@0 3747 RELOC_DISP32);
duke@0 3748 %}
duke@0 3749
duke@0 3750 enc_class absF_encoding(regF dst)
duke@0 3751 %{
duke@0 3752 int dstenc = $dst$$reg;
never@304 3753 address signmask_address = (address) StubRoutines::x86::float_sign_mask();
duke@0 3754
duke@0 3755 cbuf.set_inst_mark();
duke@0 3756 if (dstenc >= 8) {
duke@0 3757 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3758 dstenc -= 8;
duke@0 3759 }
duke@0 3760 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3761 emit_opcode(cbuf, 0x0F);
duke@0 3762 emit_opcode(cbuf, 0x54);
duke@0 3763 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3764 emit_d32_reloc(cbuf, signmask_address);
duke@0 3765 %}
duke@0 3766
duke@0 3767 enc_class absD_encoding(regD dst)
duke@0 3768 %{
duke@0 3769 int dstenc = $dst$$reg;
never@304 3770 address signmask_address = (address) StubRoutines::x86::double_sign_mask();
duke@0 3771
duke@0 3772 cbuf.set_inst_mark();
duke@0 3773 emit_opcode(cbuf, 0x66);
duke@0 3774 if (dstenc >= 8) {
duke@0 3775 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3776 dstenc -= 8;
duke@0 3777 }
duke@0 3778 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3779 emit_opcode(cbuf, 0x0F);
duke@0 3780 emit_opcode(cbuf, 0x54);
duke@0 3781 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3782 emit_d32_reloc(cbuf, signmask_address);
duke@0 3783 %}
duke@0 3784
duke@0 3785 enc_class negF_encoding(regF dst)
duke@0 3786 %{
duke@0 3787 int dstenc = $dst$$reg;
never@304 3788 address signflip_address = (address) StubRoutines::x86::float_sign_flip();
duke@0 3789
duke@0 3790 cbuf.set_inst_mark();
duke@0 3791 if (dstenc >= 8) {
duke@0 3792 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3793 dstenc -= 8;
duke@0 3794 }
duke@0 3795 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3796 emit_opcode(cbuf, 0x0F);
duke@0 3797 emit_opcode(cbuf, 0x57);
duke@0 3798 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3799 emit_d32_reloc(cbuf, signflip_address);
duke@0 3800 %}
duke@0 3801
duke@0 3802 enc_class negD_encoding(regD dst)
duke@0 3803 %{
duke@0 3804 int dstenc = $dst$$reg;
never@304 3805 address signflip_address = (address) StubRoutines::x86::double_sign_flip();
duke@0 3806
duke@0 3807 cbuf.set_inst_mark();
duke@0 3808 emit_opcode(cbuf, 0x66);
duke@0 3809 if (dstenc >= 8) {
duke@0 3810 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3811 dstenc -= 8;
duke@0 3812 }
duke@0 3813 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3814 emit_opcode(cbuf, 0x0F);
duke@0 3815 emit_opcode(cbuf, 0x57);
duke@0 3816 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3817 emit_d32_reloc(cbuf, signflip_address);
duke@0 3818 %}
duke@0 3819
duke@0 3820 enc_class f2i_fixup(rRegI dst, regF src)
duke@0 3821 %{
duke@0 3822 int dstenc = $dst$$reg;
duke@0 3823 int srcenc = $src$$reg;
duke@0 3824
duke@0 3825 // cmpl $dst, #0x80000000
duke@0 3826 if (dstenc >= 8) {
duke@0 3827 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3828 }
duke@0 3829 emit_opcode(cbuf, 0x81);
duke@0 3830 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
duke@0 3831 emit_d32(cbuf, 0x80000000);
duke@0 3832
duke@0 3833 // jne,s done
duke@0 3834 emit_opcode(cbuf, 0x75);
duke@0 3835 if (srcenc < 8 && dstenc < 8) {
duke@0 3836 emit_d8(cbuf, 0xF);
duke@0 3837 } else if (srcenc >= 8 && dstenc >= 8) {
duke@0 3838 emit_d8(cbuf, 0x11);
duke@0 3839 } else {
duke@0 3840 emit_d8(cbuf, 0x10);
duke@0 3841 }
duke@0 3842
duke@0 3843 // subq rsp, #8
duke@0 3844 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3845 emit_opcode(cbuf, 0x83);
duke@0 3846 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3847 emit_d8(cbuf, 8);
duke@0 3848
duke@0 3849 // movss [rsp], $src
duke@0 3850 emit_opcode(cbuf, 0xF3);
duke@0 3851 if (srcenc >= 8) {
duke@0 3852 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3853 }
duke@0 3854 emit_opcode(cbuf, 0x0F);
duke@0 3855 emit_opcode(cbuf, 0x11);
duke@0 3856 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
duke@0 3857
duke@0 3858 // call f2i_fixup
duke@0 3859 cbuf.set_inst_mark();
duke@0 3860 emit_opcode(cbuf, 0xE8);
duke@0 3861 emit_d32_reloc(cbuf,
duke@0 3862 (int)
never@304 3863 (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
duke@0 3864 runtime_call_Relocation::spec(),
duke@0 3865 RELOC_DISP32);
duke@0 3866
duke@0 3867 // popq $dst
duke@0 3868 if (dstenc >= 8) {
duke@0 3869 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3870 }
duke@0 3871 emit_opcode(cbuf, 0x58 | (dstenc & 7));
duke@0 3872
duke@0 3873 // done:
duke@0 3874 %}
duke@0 3875
duke@0 3876 enc_class f2l_fixup(rRegL dst, regF src)
duke@0 3877 %{
duke@0 3878 int dstenc = $dst$$reg;
duke@0 3879 int srcenc = $src$$reg;
never@304 3880 address const_address = (address) StubRoutines::x86::double_sign_flip();
duke@0 3881
duke@0 3882 // cmpq $dst, [0x8000000000000000]
duke@0 3883 cbuf.set_inst_mark();
duke@0 3884 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 3885 emit_opcode(cbuf, 0x39);
duke@0 3886 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3887 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
duke@0 3888 emit_d32_reloc(cbuf, const_address);
duke@0 3889
duke@0 3890
duke@0 3891 // jne,s done
duke@0 3892 emit_opcode(cbuf, 0x75);
duke@0 3893 if (srcenc < 8 && dstenc < 8) {
duke@0 3894 emit_d8(cbuf, 0xF);
duke@0 3895 } else if (srcenc >= 8 && dstenc >= 8) {
duke@0 3896 emit_d8(cbuf, 0x11);
duke@0 3897 } else {
duke@0 3898 emit_d8(cbuf, 0x10);
duke@0 3899 }
duke@0 3900
duke@0 3901 // subq rsp, #8
duke@0 3902 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3903 emit_opcode(cbuf, 0x83);
duke@0 3904 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3905 emit_d8(cbuf, 8);
duke@0 3906
duke@0 3907 // movss [rsp], $src
duke@0 3908 emit_opcode(cbuf, 0xF3);
duke@0 3909 if (srcenc >= 8) {
duke@0 3910 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3911 }
duke@0 3912 emit_opcode(cbuf, 0x0F);