annotate src/cpu/x86/vm/x86_64.ad @ 1274:2883969d09e7

6910664: C2: java/util/Arrays/Sorting.java fails with DeoptimizeALot flag Summary: Matcher::float_in_double should be true only when FPU is used for floats. Reviewed-by: never, twisti
author kvn
date Fri, 19 Feb 2010 10:04:16 -0800
parents 97125851f396
children d7f654633cfe
rev   line source
duke@0 1 //
twisti@624 2 // Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 //
duke@0 5 // This code is free software; you can redistribute it and/or modify it
duke@0 6 // under the terms of the GNU General Public License version 2 only, as
duke@0 7 // published by the Free Software Foundation.
duke@0 8 //
duke@0 9 // This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 // version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 // accompanied this code).
duke@0 14 //
duke@0 15 // You should have received a copy of the GNU General Public License version
duke@0 16 // 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 //
duke@0 19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 // CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 // have any questions.
duke@0 22 //
duke@0 23 //
duke@0 24
duke@0 25 // AMD64 Architecture Description File
duke@0 26
duke@0 27 //----------REGISTER DEFINITION BLOCK------------------------------------------
duke@0 28 // This information is used by the matcher and the register allocator to
duke@0 29 // describe individual registers and classes of registers within the target
duke@0 30 // archtecture.
duke@0 31
duke@0 32 register %{
duke@0 33 //----------Architecture Description Register Definitions----------------------
duke@0 34 // General Registers
duke@0 35 // "reg_def" name ( register save type, C convention save type,
duke@0 36 // ideal register type, encoding );
duke@0 37 // Register Save Types:
duke@0 38 //
duke@0 39 // NS = No-Save: The register allocator assumes that these registers
duke@0 40 // can be used without saving upon entry to the method, &
duke@0 41 // that they do not need to be saved at call sites.
duke@0 42 //
duke@0 43 // SOC = Save-On-Call: The register allocator assumes that these registers
duke@0 44 // can be used without saving upon entry to the method,
duke@0 45 // but that they must be saved at call sites.
duke@0 46 //
duke@0 47 // SOE = Save-On-Entry: The register allocator assumes that these registers
duke@0 48 // must be saved before using them upon entry to the
duke@0 49 // method, but they do not need to be saved at call
duke@0 50 // sites.
duke@0 51 //
duke@0 52 // AS = Always-Save: The register allocator assumes that these registers
duke@0 53 // must be saved before using them upon entry to the
duke@0 54 // method, & that they must be saved at call sites.
duke@0 55 //
duke@0 56 // Ideal Register Type is used to determine how to save & restore a
duke@0 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
duke@0 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
duke@0 59 //
duke@0 60 // The encoding number is the actual bit-pattern placed into the opcodes.
duke@0 61
duke@0 62 // General Registers
duke@0 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
duke@0 64 // used as byte registers)
duke@0 65
duke@0 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
duke@0 67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
duke@0 68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
duke@0 69
duke@0 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
duke@0 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
duke@0 72
duke@0 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
duke@0 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
duke@0 75
duke@0 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
duke@0 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
duke@0 78
duke@0 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
duke@0 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
duke@0 81
duke@0 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
duke@0 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
duke@0 84
duke@0 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
duke@0 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
duke@0 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
duke@0 88
duke@0 89 #ifdef _WIN64
duke@0 90
duke@0 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
duke@0 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 93
duke@0 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
duke@0 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 96
duke@0 97 #else
duke@0 98
duke@0 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
duke@0 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 101
duke@0 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
duke@0 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 104
duke@0 105 #endif
duke@0 106
duke@0 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
duke@0 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
duke@0 109
duke@0 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
duke@0 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
duke@0 112
duke@0 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
duke@0 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
duke@0 115
duke@0 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
duke@0 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
duke@0 118
duke@0 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
duke@0 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
duke@0 121
duke@0 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
duke@0 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
duke@0 124
duke@0 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
duke@0 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
duke@0 127
duke@0 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
duke@0 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
duke@0 130
duke@0 131
duke@0 132 // Floating Point Registers
duke@0 133
duke@0 134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
duke@0 135 // Word a in each register holds a Float, words ab hold a Double. We
duke@0 136 // currently do not use the SIMD capabilities, so registers cd are
duke@0 137 // unused at the moment.
duke@0 138 // XMM8-XMM15 must be encoded with REX.
duke@0 139 // Linux ABI: No register preserved across function calls
duke@0 140 // XMM0-XMM7 might hold parameters
duke@0 141 // Windows ABI: XMM6-XMM15 preserved across function calls
duke@0 142 // XMM0-XMM3 might hold parameters
duke@0 143
duke@0 144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
duke@0 145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
duke@0 146
duke@0 147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
duke@0 148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
duke@0 149
duke@0 150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
duke@0 151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
duke@0 152
duke@0 153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
duke@0 154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
duke@0 155
duke@0 156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
duke@0 157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
duke@0 158
duke@0 159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
duke@0 160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
duke@0 161
duke@0 162 #ifdef _WIN64
duke@0 163
duke@0 164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
duke@0 165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 166
duke@0 167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
duke@0 168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 169
duke@0 170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
duke@0 171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 172
duke@0 173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
duke@0 174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 175
duke@0 176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
duke@0 177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 178
duke@0 179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
duke@0 180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 181
duke@0 182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
duke@0 183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 184
duke@0 185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
duke@0 186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 187
duke@0 188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
duke@0 189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 190
duke@0 191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
duke@0 192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 193
duke@0 194 #else
duke@0 195
duke@0 196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
duke@0 197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 198
duke@0 199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
duke@0 200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 201
duke@0 202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
duke@0 203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 204
duke@0 205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
duke@0 206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 207
duke@0 208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
duke@0 209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 210
duke@0 211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
duke@0 212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 213
duke@0 214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
duke@0 215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 216
duke@0 217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
duke@0 218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 219
duke@0 220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
duke@0 221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 222
duke@0 223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
duke@0 224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 225
duke@0 226 #endif // _WIN64
duke@0 227
duke@0 228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
duke@0 229
duke@0 230 // Specify priority of register selection within phases of register
duke@0 231 // allocation. Highest priority is first. A useful heuristic is to
duke@0 232 // give registers a low priority when they are required by machine
duke@0 233 // instructions, like EAX and EDX on I486, and choose no-save registers
duke@0 234 // before save-on-call, & save-on-call before save-on-entry. Registers
duke@0 235 // which participate in fixed calling sequences should come last.
duke@0 236 // Registers which are used as pairs must fall on an even boundary.
duke@0 237
duke@0 238 alloc_class chunk0(R10, R10_H,
duke@0 239 R11, R11_H,
duke@0 240 R8, R8_H,
duke@0 241 R9, R9_H,
duke@0 242 R12, R12_H,
duke@0 243 RCX, RCX_H,
duke@0 244 RBX, RBX_H,
duke@0 245 RDI, RDI_H,
duke@0 246 RDX, RDX_H,
duke@0 247 RSI, RSI_H,
duke@0 248 RAX, RAX_H,
duke@0 249 RBP, RBP_H,
duke@0 250 R13, R13_H,
duke@0 251 R14, R14_H,
duke@0 252 R15, R15_H,
duke@0 253 RSP, RSP_H);
duke@0 254
duke@0 255 // XXX probably use 8-15 first on Linux
duke@0 256 alloc_class chunk1(XMM0, XMM0_H,
duke@0 257 XMM1, XMM1_H,
duke@0 258 XMM2, XMM2_H,
duke@0 259 XMM3, XMM3_H,
duke@0 260 XMM4, XMM4_H,
duke@0 261 XMM5, XMM5_H,
duke@0 262 XMM6, XMM6_H,
duke@0 263 XMM7, XMM7_H,
duke@0 264 XMM8, XMM8_H,
duke@0 265 XMM9, XMM9_H,
duke@0 266 XMM10, XMM10_H,
duke@0 267 XMM11, XMM11_H,
duke@0 268 XMM12, XMM12_H,
duke@0 269 XMM13, XMM13_H,
duke@0 270 XMM14, XMM14_H,
duke@0 271 XMM15, XMM15_H);
duke@0 272
duke@0 273 alloc_class chunk2(RFLAGS);
duke@0 274
duke@0 275
duke@0 276 //----------Architecture Description Register Classes--------------------------
duke@0 277 // Several register classes are automatically defined based upon information in
duke@0 278 // this architecture description.
duke@0 279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
duke@0 280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
duke@0 281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
duke@0 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
duke@0 283 //
duke@0 284
duke@0 285 // Class for all pointer registers (including RSP)
duke@0 286 reg_class any_reg(RAX, RAX_H,
duke@0 287 RDX, RDX_H,
duke@0 288 RBP, RBP_H,
duke@0 289 RDI, RDI_H,
duke@0 290 RSI, RSI_H,
duke@0 291 RCX, RCX_H,
duke@0 292 RBX, RBX_H,
duke@0 293 RSP, RSP_H,
duke@0 294 R8, R8_H,
duke@0 295 R9, R9_H,
duke@0 296 R10, R10_H,
duke@0 297 R11, R11_H,
duke@0 298 R12, R12_H,
duke@0 299 R13, R13_H,
duke@0 300 R14, R14_H,
duke@0 301 R15, R15_H);
duke@0 302
duke@0 303 // Class for all pointer registers except RSP
duke@0 304 reg_class ptr_reg(RAX, RAX_H,
duke@0 305 RDX, RDX_H,
duke@0 306 RBP, RBP_H,
duke@0 307 RDI, RDI_H,
duke@0 308 RSI, RSI_H,
duke@0 309 RCX, RCX_H,
duke@0 310 RBX, RBX_H,
duke@0 311 R8, R8_H,
duke@0 312 R9, R9_H,
duke@0 313 R10, R10_H,
duke@0 314 R11, R11_H,
duke@0 315 R13, R13_H,
duke@0 316 R14, R14_H);
duke@0 317
duke@0 318 // Class for all pointer registers except RAX and RSP
duke@0 319 reg_class ptr_no_rax_reg(RDX, RDX_H,
duke@0 320 RBP, RBP_H,
duke@0 321 RDI, RDI_H,
duke@0 322 RSI, RSI_H,
duke@0 323 RCX, RCX_H,
duke@0 324 RBX, RBX_H,
duke@0 325 R8, R8_H,
duke@0 326 R9, R9_H,
duke@0 327 R10, R10_H,
duke@0 328 R11, R11_H,
duke@0 329 R13, R13_H,
duke@0 330 R14, R14_H);
duke@0 331
duke@0 332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
duke@0 333 RAX, RAX_H,
duke@0 334 RDI, RDI_H,
duke@0 335 RSI, RSI_H,
duke@0 336 RCX, RCX_H,
duke@0 337 RBX, RBX_H,
duke@0 338 R8, R8_H,
duke@0 339 R9, R9_H,
duke@0 340 R10, R10_H,
duke@0 341 R11, R11_H,
duke@0 342 R13, R13_H,
duke@0 343 R14, R14_H);
duke@0 344
duke@0 345 // Class for all pointer registers except RAX, RBX and RSP
duke@0 346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
duke@0 347 RBP, RBP_H,
duke@0 348 RDI, RDI_H,
duke@0 349 RSI, RSI_H,
duke@0 350 RCX, RCX_H,
duke@0 351 R8, R8_H,
duke@0 352 R9, R9_H,
duke@0 353 R10, R10_H,
duke@0 354 R11, R11_H,
duke@0 355 R13, R13_H,
duke@0 356 R14, R14_H);
duke@0 357
duke@0 358 // Singleton class for RAX pointer register
duke@0 359 reg_class ptr_rax_reg(RAX, RAX_H);
duke@0 360
duke@0 361 // Singleton class for RBX pointer register
duke@0 362 reg_class ptr_rbx_reg(RBX, RBX_H);
duke@0 363
duke@0 364 // Singleton class for RSI pointer register
duke@0 365 reg_class ptr_rsi_reg(RSI, RSI_H);
duke@0 366
duke@0 367 // Singleton class for RDI pointer register
duke@0 368 reg_class ptr_rdi_reg(RDI, RDI_H);
duke@0 369
duke@0 370 // Singleton class for RBP pointer register
duke@0 371 reg_class ptr_rbp_reg(RBP, RBP_H);
duke@0 372
duke@0 373 // Singleton class for stack pointer
duke@0 374 reg_class ptr_rsp_reg(RSP, RSP_H);
duke@0 375
duke@0 376 // Singleton class for TLS pointer
duke@0 377 reg_class ptr_r15_reg(R15, R15_H);
duke@0 378
duke@0 379 // Class for all long registers (except RSP)
duke@0 380 reg_class long_reg(RAX, RAX_H,
duke@0 381 RDX, RDX_H,
duke@0 382 RBP, RBP_H,
duke@0 383 RDI, RDI_H,
duke@0 384 RSI, RSI_H,
duke@0 385 RCX, RCX_H,
duke@0 386 RBX, RBX_H,
duke@0 387 R8, R8_H,
duke@0 388 R9, R9_H,
duke@0 389 R10, R10_H,
duke@0 390 R11, R11_H,
duke@0 391 R13, R13_H,
duke@0 392 R14, R14_H);
duke@0 393
duke@0 394 // Class for all long registers except RAX, RDX (and RSP)
duke@0 395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
duke@0 396 RDI, RDI_H,
duke@0 397 RSI, RSI_H,
duke@0 398 RCX, RCX_H,
duke@0 399 RBX, RBX_H,
duke@0 400 R8, R8_H,
duke@0 401 R9, R9_H,
duke@0 402 R10, R10_H,
duke@0 403 R11, R11_H,
duke@0 404 R13, R13_H,
duke@0 405 R14, R14_H);
duke@0 406
duke@0 407 // Class for all long registers except RCX (and RSP)
duke@0 408 reg_class long_no_rcx_reg(RBP, RBP_H,
duke@0 409 RDI, RDI_H,
duke@0 410 RSI, RSI_H,
duke@0 411 RAX, RAX_H,
duke@0 412 RDX, RDX_H,
duke@0 413 RBX, RBX_H,
duke@0 414 R8, R8_H,
duke@0 415 R9, R9_H,
duke@0 416 R10, R10_H,
duke@0 417 R11, R11_H,
duke@0 418 R13, R13_H,
duke@0 419 R14, R14_H);
duke@0 420
duke@0 421 // Class for all long registers except RAX (and RSP)
duke@0 422 reg_class long_no_rax_reg(RBP, RBP_H,
duke@0 423 RDX, RDX_H,
duke@0 424 RDI, RDI_H,
duke@0 425 RSI, RSI_H,
duke@0 426 RCX, RCX_H,
duke@0 427 RBX, RBX_H,
duke@0 428 R8, R8_H,
duke@0 429 R9, R9_H,
duke@0 430 R10, R10_H,
duke@0 431 R11, R11_H,
duke@0 432 R13, R13_H,
duke@0 433 R14, R14_H);
duke@0 434
duke@0 435 // Singleton class for RAX long register
duke@0 436 reg_class long_rax_reg(RAX, RAX_H);
duke@0 437
duke@0 438 // Singleton class for RCX long register
duke@0 439 reg_class long_rcx_reg(RCX, RCX_H);
duke@0 440
duke@0 441 // Singleton class for RDX long register
duke@0 442 reg_class long_rdx_reg(RDX, RDX_H);
duke@0 443
duke@0 444 // Class for all int registers (except RSP)
duke@0 445 reg_class int_reg(RAX,
duke@0 446 RDX,
duke@0 447 RBP,
duke@0 448 RDI,
duke@0 449 RSI,
duke@0 450 RCX,
duke@0 451 RBX,
duke@0 452 R8,
duke@0 453 R9,
duke@0 454 R10,
duke@0 455 R11,
duke@0 456 R13,
duke@0 457 R14);
duke@0 458
duke@0 459 // Class for all int registers except RCX (and RSP)
duke@0 460 reg_class int_no_rcx_reg(RAX,
duke@0 461 RDX,
duke@0 462 RBP,
duke@0 463 RDI,
duke@0 464 RSI,
duke@0 465 RBX,
duke@0 466 R8,
duke@0 467 R9,
duke@0 468 R10,
duke@0 469 R11,
duke@0 470 R13,
duke@0 471 R14);
duke@0 472
duke@0 473 // Class for all int registers except RAX, RDX (and RSP)
duke@0 474 reg_class int_no_rax_rdx_reg(RBP,
never@304 475 RDI,
duke@0 476 RSI,
duke@0 477 RCX,
duke@0 478 RBX,
duke@0 479 R8,
duke@0 480 R9,
duke@0 481 R10,
duke@0 482 R11,
duke@0 483 R13,
duke@0 484 R14);
duke@0 485
duke@0 486 // Singleton class for RAX int register
duke@0 487 reg_class int_rax_reg(RAX);
duke@0 488
duke@0 489 // Singleton class for RBX int register
duke@0 490 reg_class int_rbx_reg(RBX);
duke@0 491
duke@0 492 // Singleton class for RCX int register
duke@0 493 reg_class int_rcx_reg(RCX);
duke@0 494
duke@0 495 // Singleton class for RCX int register
duke@0 496 reg_class int_rdx_reg(RDX);
duke@0 497
duke@0 498 // Singleton class for RCX int register
duke@0 499 reg_class int_rdi_reg(RDI);
duke@0 500
duke@0 501 // Singleton class for instruction pointer
duke@0 502 // reg_class ip_reg(RIP);
duke@0 503
duke@0 504 // Singleton class for condition codes
duke@0 505 reg_class int_flags(RFLAGS);
duke@0 506
duke@0 507 // Class for all float registers
duke@0 508 reg_class float_reg(XMM0,
duke@0 509 XMM1,
duke@0 510 XMM2,
duke@0 511 XMM3,
duke@0 512 XMM4,
duke@0 513 XMM5,
duke@0 514 XMM6,
duke@0 515 XMM7,
duke@0 516 XMM8,
duke@0 517 XMM9,
duke@0 518 XMM10,
duke@0 519 XMM11,
duke@0 520 XMM12,
duke@0 521 XMM13,
duke@0 522 XMM14,
duke@0 523 XMM15);
duke@0 524
duke@0 525 // Class for all double registers
duke@0 526 reg_class double_reg(XMM0, XMM0_H,
duke@0 527 XMM1, XMM1_H,
duke@0 528 XMM2, XMM2_H,
duke@0 529 XMM3, XMM3_H,
duke@0 530 XMM4, XMM4_H,
duke@0 531 XMM5, XMM5_H,
duke@0 532 XMM6, XMM6_H,
duke@0 533 XMM7, XMM7_H,
duke@0 534 XMM8, XMM8_H,
duke@0 535 XMM9, XMM9_H,
duke@0 536 XMM10, XMM10_H,
duke@0 537 XMM11, XMM11_H,
duke@0 538 XMM12, XMM12_H,
duke@0 539 XMM13, XMM13_H,
duke@0 540 XMM14, XMM14_H,
duke@0 541 XMM15, XMM15_H);
duke@0 542 %}
duke@0 543
duke@0 544
duke@0 545 //----------SOURCE BLOCK-------------------------------------------------------
duke@0 546 // This is a block of C++ code which provides values, functions, and
duke@0 547 // definitions necessary in the rest of the architecture description
duke@0 548 source %{
never@304 549 #define RELOC_IMM64 Assembler::imm_operand
duke@0 550 #define RELOC_DISP32 Assembler::disp32_operand
duke@0 551
duke@0 552 #define __ _masm.
duke@0 553
twisti@1137 554 static int preserve_SP_size() {
twisti@1137 555 return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
twisti@1137 556 }
twisti@1137 557
duke@0 558 // !!!!! Special hack to get all types of calls to specify the byte offset
duke@0 559 // from the start of the call to the point where the return address
duke@0 560 // will point.
duke@0 561 int MachCallStaticJavaNode::ret_addr_offset()
duke@0 562 {
twisti@1137 563 int offset = 5; // 5 bytes from start of call to where return address points
twisti@1137 564 if (_method_handle_invoke)
twisti@1137 565 offset += preserve_SP_size();
twisti@1137 566 return offset;
duke@0 567 }
duke@0 568
duke@0 569 int MachCallDynamicJavaNode::ret_addr_offset()
duke@0 570 {
duke@0 571 return 15; // 15 bytes from start of call to where return address points
duke@0 572 }
duke@0 573
duke@0 574 // In os_cpu .ad file
duke@0 575 // int MachCallRuntimeNode::ret_addr_offset()
duke@0 576
duke@0 577 // Indicate if the safepoint node needs the polling page as an input.
duke@0 578 // Since amd64 does not have absolute addressing but RIP-relative
duke@0 579 // addressing and the polling page is within 2G, it doesn't.
duke@0 580 bool SafePointNode::needs_polling_address_input()
duke@0 581 {
duke@0 582 return false;
duke@0 583 }
duke@0 584
duke@0 585 //
duke@0 586 // Compute padding required for nodes which need alignment
duke@0 587 //
duke@0 588
duke@0 589 // The address of the call instruction needs to be 4-byte aligned to
duke@0 590 // ensure that it does not span a cache line so that it can be patched.
duke@0 591 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
duke@0 592 {
duke@0 593 current_offset += 1; // skip call opcode byte
duke@0 594 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 595 }
duke@0 596
duke@0 597 // The address of the call instruction needs to be 4-byte aligned to
duke@0 598 // ensure that it does not span a cache line so that it can be patched.
twisti@1137 599 int CallStaticJavaHandleNode::compute_padding(int current_offset) const
twisti@1137 600 {
twisti@1137 601 current_offset += preserve_SP_size(); // skip mov rbp, rsp
twisti@1137 602 current_offset += 1; // skip call opcode byte
twisti@1137 603 return round_to(current_offset, alignment_required()) - current_offset;
twisti@1137 604 }
twisti@1137 605
twisti@1137 606 // The address of the call instruction needs to be 4-byte aligned to
twisti@1137 607 // ensure that it does not span a cache line so that it can be patched.
duke@0 608 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
duke@0 609 {
duke@0 610 current_offset += 11; // skip movq instruction + call opcode byte
duke@0 611 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 612 }
duke@0 613
duke@0 614 #ifndef PRODUCT
duke@0 615 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 616 {
duke@0 617 st->print("INT3");
duke@0 618 }
duke@0 619 #endif
duke@0 620
duke@0 621 // EMIT_RM()
duke@0 622 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
duke@0 623 {
duke@0 624 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
duke@0 625 *(cbuf.code_end()) = c;
duke@0 626 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 627 }
duke@0 628
duke@0 629 // EMIT_CC()
duke@0 630 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
duke@0 631 {
duke@0 632 unsigned char c = (unsigned char) (f1 | f2);
duke@0 633 *(cbuf.code_end()) = c;
duke@0 634 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 635 }
duke@0 636
duke@0 637 // EMIT_OPCODE()
duke@0 638 void emit_opcode(CodeBuffer &cbuf, int code)
duke@0 639 {
duke@0 640 *(cbuf.code_end()) = (unsigned char) code;
duke@0 641 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 642 }
duke@0 643
duke@0 644 // EMIT_OPCODE() w/ relocation information
duke@0 645 void emit_opcode(CodeBuffer &cbuf,
duke@0 646 int code, relocInfo::relocType reloc, int offset, int format)
duke@0 647 {
duke@0 648 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
duke@0 649 emit_opcode(cbuf, code);
duke@0 650 }
duke@0 651
duke@0 652 // EMIT_D8()
duke@0 653 void emit_d8(CodeBuffer &cbuf, int d8)
duke@0 654 {
duke@0 655 *(cbuf.code_end()) = (unsigned char) d8;
duke@0 656 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 657 }
duke@0 658
duke@0 659 // EMIT_D16()
duke@0 660 void emit_d16(CodeBuffer &cbuf, int d16)
duke@0 661 {
duke@0 662 *((short *)(cbuf.code_end())) = d16;
duke@0 663 cbuf.set_code_end(cbuf.code_end() + 2);
duke@0 664 }
duke@0 665
duke@0 666 // EMIT_D32()
duke@0 667 void emit_d32(CodeBuffer &cbuf, int d32)
duke@0 668 {
duke@0 669 *((int *)(cbuf.code_end())) = d32;
duke@0 670 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 671 }
duke@0 672
duke@0 673 // EMIT_D64()
duke@0 674 void emit_d64(CodeBuffer &cbuf, int64_t d64)
duke@0 675 {
duke@0 676 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 677 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 678 }
duke@0 679
duke@0 680 // emit 32 bit value and construct relocation entry from relocInfo::relocType
duke@0 681 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 682 int d32,
duke@0 683 relocInfo::relocType reloc,
duke@0 684 int format)
duke@0 685 {
duke@0 686 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
duke@0 687 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 688
duke@0 689 *((int*) (cbuf.code_end())) = d32;
duke@0 690 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 691 }
duke@0 692
duke@0 693 // emit 32 bit value and construct relocation entry from RelocationHolder
duke@0 694 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 695 int d32,
duke@0 696 RelocationHolder const& rspec,
duke@0 697 int format)
duke@0 698 {
duke@0 699 #ifdef ASSERT
duke@0 700 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 701 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
jrose@989 702 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
duke@0 703 }
duke@0 704 #endif
duke@0 705 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 706
duke@0 707 *((int* )(cbuf.code_end())) = d32;
duke@0 708 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 709 }
duke@0 710
duke@0 711 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
duke@0 712 address next_ip = cbuf.code_end() + 4;
duke@0 713 emit_d32_reloc(cbuf, (int) (addr - next_ip),
duke@0 714 external_word_Relocation::spec(addr),
duke@0 715 RELOC_DISP32);
duke@0 716 }
duke@0 717
duke@0 718
duke@0 719 // emit 64 bit value and construct relocation entry from relocInfo::relocType
duke@0 720 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 721 int64_t d64,
duke@0 722 relocInfo::relocType reloc,
duke@0 723 int format)
duke@0 724 {
duke@0 725 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 726
duke@0 727 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 728 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 729 }
duke@0 730
duke@0 731 // emit 64 bit value and construct relocation entry from RelocationHolder
duke@0 732 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 733 int64_t d64,
duke@0 734 RelocationHolder const& rspec,
duke@0 735 int format)
duke@0 736 {
duke@0 737 #ifdef ASSERT
duke@0 738 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 739 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
jrose@989 740 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
jrose@989 741 "cannot embed scavengable oops in code");
duke@0 742 }
duke@0 743 #endif
duke@0 744 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 745
duke@0 746 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 747 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 748 }
duke@0 749
duke@0 750 // Access stack slot for load or store
duke@0 751 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
duke@0 752 {
duke@0 753 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
duke@0 754 if (-0x80 <= disp && disp < 0x80) {
duke@0 755 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
duke@0 756 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 757 emit_d8(cbuf, disp); // Displacement // R/M byte
duke@0 758 } else {
duke@0 759 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
duke@0 760 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 761 emit_d32(cbuf, disp); // Displacement // R/M byte
duke@0 762 }
duke@0 763 }
duke@0 764
duke@0 765 // rRegI ereg, memory mem) %{ // emit_reg_mem
duke@0 766 void encode_RegMem(CodeBuffer &cbuf,
duke@0 767 int reg,
duke@0 768 int base, int index, int scale, int disp, bool disp_is_oop)
duke@0 769 {
duke@0 770 assert(!disp_is_oop, "cannot have disp");
duke@0 771 int regenc = reg & 7;
duke@0 772 int baseenc = base & 7;
duke@0 773 int indexenc = index & 7;
duke@0 774
duke@0 775 // There is no index & no scale, use form without SIB byte
duke@0 776 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
duke@0 777 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 778 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 779 emit_rm(cbuf, 0x0, regenc, baseenc); // *
duke@0 780 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 781 // If 8-bit displacement, mode 0x1
duke@0 782 emit_rm(cbuf, 0x1, regenc, baseenc); // *
duke@0 783 emit_d8(cbuf, disp);
duke@0 784 } else {
duke@0 785 // If 32-bit displacement
duke@0 786 if (base == -1) { // Special flag for absolute address
duke@0 787 emit_rm(cbuf, 0x0, regenc, 0x5); // *
duke@0 788 if (disp_is_oop) {
duke@0 789 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 790 } else {
duke@0 791 emit_d32(cbuf, disp);
duke@0 792 }
duke@0 793 } else {
duke@0 794 // Normal base + offset
duke@0 795 emit_rm(cbuf, 0x2, regenc, baseenc); // *
duke@0 796 if (disp_is_oop) {
duke@0 797 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 798 } else {
duke@0 799 emit_d32(cbuf, disp);
duke@0 800 }
duke@0 801 }
duke@0 802 }
duke@0 803 } else {
duke@0 804 // Else, encode with the SIB byte
duke@0 805 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 806 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 807 // If no displacement
duke@0 808 emit_rm(cbuf, 0x0, regenc, 0x4); // *
duke@0 809 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 810 } else {
duke@0 811 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 812 // If 8-bit displacement, mode 0x1
duke@0 813 emit_rm(cbuf, 0x1, regenc, 0x4); // *
duke@0 814 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 815 emit_d8(cbuf, disp);
duke@0 816 } else {
duke@0 817 // If 32-bit displacement
duke@0 818 if (base == 0x04 ) {
duke@0 819 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 820 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
duke@0 821 } else {
duke@0 822 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 823 emit_rm(cbuf, scale, indexenc, baseenc); // *
duke@0 824 }
duke@0 825 if (disp_is_oop) {
duke@0 826 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 827 } else {
duke@0 828 emit_d32(cbuf, disp);
duke@0 829 }
duke@0 830 }
duke@0 831 }
duke@0 832 }
duke@0 833 }
duke@0 834
duke@0 835 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
duke@0 836 {
duke@0 837 if (dstenc != srcenc) {
duke@0 838 if (dstenc < 8) {
duke@0 839 if (srcenc >= 8) {
duke@0 840 emit_opcode(cbuf, Assembler::REX_B);
duke@0 841 srcenc -= 8;
duke@0 842 }
duke@0 843 } else {
duke@0 844 if (srcenc < 8) {
duke@0 845 emit_opcode(cbuf, Assembler::REX_R);
duke@0 846 } else {
duke@0 847 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 848 srcenc -= 8;
duke@0 849 }
duke@0 850 dstenc -= 8;
duke@0 851 }
duke@0 852
duke@0 853 emit_opcode(cbuf, 0x8B);
duke@0 854 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 855 }
duke@0 856 }
duke@0 857
duke@0 858 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
duke@0 859 if( dst_encoding == src_encoding ) {
duke@0 860 // reg-reg copy, use an empty encoding
duke@0 861 } else {
duke@0 862 MacroAssembler _masm(&cbuf);
duke@0 863
duke@0 864 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
duke@0 865 }
duke@0 866 }
duke@0 867
duke@0 868
duke@0 869 //=============================================================================
duke@0 870 #ifndef PRODUCT
duke@0 871 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 872 {
duke@0 873 Compile* C = ra_->C;
duke@0 874
duke@0 875 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 876 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 877 // Remove wordSize for return adr already pushed
duke@0 878 // and another for the RBP we are going to save
duke@0 879 framesize -= 2*wordSize;
duke@0 880 bool need_nop = true;
duke@0 881
duke@0 882 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 883 // We require that their callers must bang for them. But be
duke@0 884 // careful, because some VM calls (such as call site linkage) can
duke@0 885 // use several kilobytes of stack. But the stack safety zone should
duke@0 886 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 887 if (C->need_stack_bang(framesize)) {
duke@0 888 st->print_cr("# stack bang"); st->print("\t");
duke@0 889 need_nop = false;
duke@0 890 }
duke@0 891 st->print_cr("pushq rbp"); st->print("\t");
duke@0 892
duke@0 893 if (VerifyStackAtCalls) {
duke@0 894 // Majik cookie to verify stack depth
duke@0 895 st->print_cr("pushq 0xffffffffbadb100d"
duke@0 896 "\t# Majik cookie for stack depth check");
duke@0 897 st->print("\t");
duke@0 898 framesize -= wordSize; // Remove 2 for cookie
duke@0 899 need_nop = false;
duke@0 900 }
duke@0 901
duke@0 902 if (framesize) {
duke@0 903 st->print("subq rsp, #%d\t# Create frame", framesize);
duke@0 904 if (framesize < 0x80 && need_nop) {
duke@0 905 st->print("\n\tnop\t# nop for patch_verified_entry");
duke@0 906 }
duke@0 907 }
duke@0 908 }
duke@0 909 #endif
duke@0 910
duke@0 911 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 912 {
duke@0 913 Compile* C = ra_->C;
duke@0 914
duke@0 915 // WARNING: Initial instruction MUST be 5 bytes or longer so that
duke@0 916 // NativeJump::patch_verified_entry will be able to patch out the entry
duke@0 917 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
duke@0 918 // depth is ok at 5 bytes, the frame allocation can be either 3 or
duke@0 919 // 6 bytes. So if we don't do the fldcw or the push then we must
duke@0 920 // use the 6 byte frame allocation even if we have no frame. :-(
duke@0 921 // If method sets FPU control word do it now
duke@0 922
duke@0 923 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 924 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 925 // Remove wordSize for return adr already pushed
duke@0 926 // and another for the RBP we are going to save
duke@0 927 framesize -= 2*wordSize;
duke@0 928 bool need_nop = true;
duke@0 929
duke@0 930 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 931 // We require that their callers must bang for them. But be
duke@0 932 // careful, because some VM calls (such as call site linkage) can
duke@0 933 // use several kilobytes of stack. But the stack safety zone should
duke@0 934 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 935 if (C->need_stack_bang(framesize)) {
duke@0 936 MacroAssembler masm(&cbuf);
duke@0 937 masm.generate_stack_overflow_check(framesize);
duke@0 938 need_nop = false;
duke@0 939 }
duke@0 940
duke@0 941 // We always push rbp so that on return to interpreter rbp will be
duke@0 942 // restored correctly and we can correct the stack.
duke@0 943 emit_opcode(cbuf, 0x50 | RBP_enc);
duke@0 944
duke@0 945 if (VerifyStackAtCalls) {
duke@0 946 // Majik cookie to verify stack depth
duke@0 947 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
duke@0 948 emit_d32(cbuf, 0xbadb100d);
duke@0 949 framesize -= wordSize; // Remove 2 for cookie
duke@0 950 need_nop = false;
duke@0 951 }
duke@0 952
duke@0 953 if (framesize) {
duke@0 954 emit_opcode(cbuf, Assembler::REX_W);
duke@0 955 if (framesize < 0x80) {
duke@0 956 emit_opcode(cbuf, 0x83); // sub SP,#framesize
duke@0 957 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 958 emit_d8(cbuf, framesize);
duke@0 959 if (need_nop) {
duke@0 960 emit_opcode(cbuf, 0x90); // nop
duke@0 961 }
duke@0 962 } else {
duke@0 963 emit_opcode(cbuf, 0x81); // sub SP,#framesize
duke@0 964 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 965 emit_d32(cbuf, framesize);
duke@0 966 }
duke@0 967 }
duke@0 968
duke@0 969 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
duke@0 970
duke@0 971 #ifdef ASSERT
duke@0 972 if (VerifyStackAtCalls) {
duke@0 973 Label L;
duke@0 974 MacroAssembler masm(&cbuf);
never@304 975 masm.push(rax);
never@304 976 masm.mov(rax, rsp);
never@304 977 masm.andptr(rax, StackAlignmentInBytes-1);
never@304 978 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
never@304 979 masm.pop(rax);
duke@0 980 masm.jcc(Assembler::equal, L);
duke@0 981 masm.stop("Stack is not properly aligned!");
duke@0 982 masm.bind(L);
duke@0 983 }
duke@0 984 #endif
duke@0 985 }
duke@0 986
duke@0 987 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
duke@0 988 {
duke@0 989 return MachNode::size(ra_); // too many variables; just compute it
duke@0 990 // the hard way
duke@0 991 }
duke@0 992
duke@0 993 int MachPrologNode::reloc() const
duke@0 994 {
duke@0 995 return 0; // a large enough number
duke@0 996 }
duke@0 997
duke@0 998 //=============================================================================
duke@0 999 #ifndef PRODUCT
duke@0 1000 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1001 {
duke@0 1002 Compile* C = ra_->C;
duke@0 1003 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1004 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1005 // Remove word for return adr already pushed
duke@0 1006 // and RBP
duke@0 1007 framesize -= 2*wordSize;
duke@0 1008
duke@0 1009 if (framesize) {
duke@0 1010 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
duke@0 1011 st->print("\t");
duke@0 1012 }
duke@0 1013
duke@0 1014 st->print_cr("popq\trbp");
duke@0 1015 if (do_polling() && C->is_method_compilation()) {
duke@0 1016 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
duke@0 1017 "# Safepoint: poll for GC");
duke@0 1018 st->print("\t");
duke@0 1019 }
duke@0 1020 }
duke@0 1021 #endif
duke@0 1022
duke@0 1023 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1024 {
duke@0 1025 Compile* C = ra_->C;
duke@0 1026 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1027 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1028 // Remove word for return adr already pushed
duke@0 1029 // and RBP
duke@0 1030 framesize -= 2*wordSize;
duke@0 1031
duke@0 1032 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
duke@0 1033
duke@0 1034 if (framesize) {
duke@0 1035 emit_opcode(cbuf, Assembler::REX_W);
duke@0 1036 if (framesize < 0x80) {
duke@0 1037 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
duke@0 1038 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1039 emit_d8(cbuf, framesize);
duke@0 1040 } else {
duke@0 1041 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
duke@0 1042 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1043 emit_d32(cbuf, framesize);
duke@0 1044 }
duke@0 1045 }
duke@0 1046
duke@0 1047 // popq rbp
duke@0 1048 emit_opcode(cbuf, 0x58 | RBP_enc);
duke@0 1049
duke@0 1050 if (do_polling() && C->is_method_compilation()) {
duke@0 1051 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
duke@0 1052 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 1053 cbuf.set_inst_mark();
duke@0 1054 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
duke@0 1055 emit_opcode(cbuf, 0x85); // testl
duke@0 1056 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
duke@0 1057 // cbuf.inst_mark() is beginning of instruction
duke@0 1058 emit_d32_reloc(cbuf, os::get_polling_page());
duke@0 1059 // relocInfo::poll_return_type,
duke@0 1060 }
duke@0 1061 }
duke@0 1062
duke@0 1063 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
duke@0 1064 {
duke@0 1065 Compile* C = ra_->C;
duke@0 1066 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1067 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1068 // Remove word for return adr already pushed
duke@0 1069 // and RBP
duke@0 1070 framesize -= 2*wordSize;
duke@0 1071
duke@0 1072 uint size = 0;
duke@0 1073
duke@0 1074 if (do_polling() && C->is_method_compilation()) {
duke@0 1075 size += 6;
duke@0 1076 }
duke@0 1077
duke@0 1078 // count popq rbp
duke@0 1079 size++;
duke@0 1080
duke@0 1081 if (framesize) {
duke@0 1082 if (framesize < 0x80) {
duke@0 1083 size += 4;
duke@0 1084 } else if (framesize) {
duke@0 1085 size += 7;
duke@0 1086 }
duke@0 1087 }
duke@0 1088
duke@0 1089 return size;
duke@0 1090 }
duke@0 1091
duke@0 1092 int MachEpilogNode::reloc() const
duke@0 1093 {
duke@0 1094 return 2; // a large enough number
duke@0 1095 }
duke@0 1096
duke@0 1097 const Pipeline* MachEpilogNode::pipeline() const
duke@0 1098 {
duke@0 1099 return MachNode::pipeline_class();
duke@0 1100 }
duke@0 1101
duke@0 1102 int MachEpilogNode::safepoint_offset() const
duke@0 1103 {
duke@0 1104 return 0;
duke@0 1105 }
duke@0 1106
duke@0 1107 //=============================================================================
duke@0 1108
duke@0 1109 enum RC {
duke@0 1110 rc_bad,
duke@0 1111 rc_int,
duke@0 1112 rc_float,
duke@0 1113 rc_stack
duke@0 1114 };
duke@0 1115
duke@0 1116 static enum RC rc_class(OptoReg::Name reg)
duke@0 1117 {
duke@0 1118 if( !OptoReg::is_valid(reg) ) return rc_bad;
duke@0 1119
duke@0 1120 if (OptoReg::is_stack(reg)) return rc_stack;
duke@0 1121
duke@0 1122 VMReg r = OptoReg::as_VMReg(reg);
duke@0 1123
duke@0 1124 if (r->is_Register()) return rc_int;
duke@0 1125
duke@0 1126 assert(r->is_XMMRegister(), "must be");
duke@0 1127 return rc_float;
duke@0 1128 }
duke@0 1129
duke@0 1130 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
duke@0 1131 PhaseRegAlloc* ra_,
duke@0 1132 bool do_size,
duke@0 1133 outputStream* st) const
duke@0 1134 {
duke@0 1135
duke@0 1136 // Get registers to move
duke@0 1137 OptoReg::Name src_second = ra_->get_reg_second(in(1));
duke@0 1138 OptoReg::Name src_first = ra_->get_reg_first(in(1));
duke@0 1139 OptoReg::Name dst_second = ra_->get_reg_second(this);
duke@0 1140 OptoReg::Name dst_first = ra_->get_reg_first(this);
duke@0 1141
duke@0 1142 enum RC src_second_rc = rc_class(src_second);
duke@0 1143 enum RC src_first_rc = rc_class(src_first);
duke@0 1144 enum RC dst_second_rc = rc_class(dst_second);
duke@0 1145 enum RC dst_first_rc = rc_class(dst_first);
duke@0 1146
duke@0 1147 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
duke@0 1148 "must move at least 1 register" );
duke@0 1149
duke@0 1150 if (src_first == dst_first && src_second == dst_second) {
duke@0 1151 // Self copy, no move
duke@0 1152 return 0;
duke@0 1153 } else if (src_first_rc == rc_stack) {
duke@0 1154 // mem ->
duke@0 1155 if (dst_first_rc == rc_stack) {
duke@0 1156 // mem -> mem
duke@0 1157 assert(src_second != dst_first, "overlap");
duke@0 1158 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1159 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1160 // 64-bit
duke@0 1161 int src_offset = ra_->reg2offset(src_first);
duke@0 1162 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1163 if (cbuf) {
duke@0 1164 emit_opcode(*cbuf, 0xFF);
duke@0 1165 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
duke@0 1166
duke@0 1167 emit_opcode(*cbuf, 0x8F);
duke@0 1168 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
duke@0 1169
duke@0 1170 #ifndef PRODUCT
duke@0 1171 } else if (!do_size) {
duke@0 1172 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
duke@0 1173 "popq [rsp + #%d]",
duke@0 1174 src_offset,
duke@0 1175 dst_offset);
duke@0 1176 #endif
duke@0 1177 }
duke@0 1178 return
duke@0 1179 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
duke@0 1180 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
duke@0 1181 } else {
duke@0 1182 // 32-bit
duke@0 1183 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1184 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1185 // No pushl/popl, so:
duke@0 1186 int src_offset = ra_->reg2offset(src_first);
duke@0 1187 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1188 if (cbuf) {
duke@0 1189 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1190 emit_opcode(*cbuf, 0x89);
duke@0 1191 emit_opcode(*cbuf, 0x44);
duke@0 1192 emit_opcode(*cbuf, 0x24);
duke@0 1193 emit_opcode(*cbuf, 0xF8);
duke@0 1194
duke@0 1195 emit_opcode(*cbuf, 0x8B);
duke@0 1196 encode_RegMem(*cbuf,
duke@0 1197 RAX_enc,
duke@0 1198 RSP_enc, 0x4, 0, src_offset,
duke@0 1199 false);
duke@0 1200
duke@0 1201 emit_opcode(*cbuf, 0x89);
duke@0 1202 encode_RegMem(*cbuf,
duke@0 1203 RAX_enc,
duke@0 1204 RSP_enc, 0x4, 0, dst_offset,
duke@0 1205 false);
duke@0 1206
duke@0 1207 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1208 emit_opcode(*cbuf, 0x8B);
duke@0 1209 emit_opcode(*cbuf, 0x44);
duke@0 1210 emit_opcode(*cbuf, 0x24);
duke@0 1211 emit_opcode(*cbuf, 0xF8);
duke@0 1212
duke@0 1213 #ifndef PRODUCT
duke@0 1214 } else if (!do_size) {
duke@0 1215 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
duke@0 1216 "movl rax, [rsp + #%d]\n\t"
duke@0 1217 "movl [rsp + #%d], rax\n\t"
duke@0 1218 "movq rax, [rsp - #8]",
duke@0 1219 src_offset,
duke@0 1220 dst_offset);
duke@0 1221 #endif
duke@0 1222 }
duke@0 1223 return
duke@0 1224 5 + // movq
duke@0 1225 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1226 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1227 5; // movq
duke@0 1228 }
duke@0 1229 } else if (dst_first_rc == rc_int) {
duke@0 1230 // mem -> gpr
duke@0 1231 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1232 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1233 // 64-bit
duke@0 1234 int offset = ra_->reg2offset(src_first);
duke@0 1235 if (cbuf) {
duke@0 1236 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1237 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1238 } else {
duke@0 1239 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1240 }
duke@0 1241 emit_opcode(*cbuf, 0x8B);
duke@0 1242 encode_RegMem(*cbuf,
duke@0 1243 Matcher::_regEncode[dst_first],
duke@0 1244 RSP_enc, 0x4, 0, offset,
duke@0 1245 false);
duke@0 1246 #ifndef PRODUCT
duke@0 1247 } else if (!do_size) {
duke@0 1248 st->print("movq %s, [rsp + #%d]\t# spill",
duke@0 1249 Matcher::regName[dst_first],
duke@0 1250 offset);
duke@0 1251 #endif
duke@0 1252 }
duke@0 1253 return
duke@0 1254 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1255 } else {
duke@0 1256 // 32-bit
duke@0 1257 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1258 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1259 int offset = ra_->reg2offset(src_first);
duke@0 1260 if (cbuf) {
duke@0 1261 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1262 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1263 }
duke@0 1264 emit_opcode(*cbuf, 0x8B);
duke@0 1265 encode_RegMem(*cbuf,
duke@0 1266 Matcher::_regEncode[dst_first],
duke@0 1267 RSP_enc, 0x4, 0, offset,
duke@0 1268 false);
duke@0 1269 #ifndef PRODUCT
duke@0 1270 } else if (!do_size) {
duke@0 1271 st->print("movl %s, [rsp + #%d]\t# spill",
duke@0 1272 Matcher::regName[dst_first],
duke@0 1273 offset);
duke@0 1274 #endif
duke@0 1275 }
duke@0 1276 return
duke@0 1277 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1278 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1279 ? 3
duke@0 1280 : 4); // REX
duke@0 1281 }
duke@0 1282 } else if (dst_first_rc == rc_float) {
duke@0 1283 // mem-> xmm
duke@0 1284 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1285 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1286 // 64-bit
duke@0 1287 int offset = ra_->reg2offset(src_first);
duke@0 1288 if (cbuf) {
duke@0 1289 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 1290 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1291 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1292 }
duke@0 1293 emit_opcode(*cbuf, 0x0F);
duke@0 1294 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 1295 encode_RegMem(*cbuf,
duke@0 1296 Matcher::_regEncode[dst_first],
duke@0 1297 RSP_enc, 0x4, 0, offset,
duke@0 1298 false);
duke@0 1299 #ifndef PRODUCT
duke@0 1300 } else if (!do_size) {
duke@0 1301 st->print("%s %s, [rsp + #%d]\t# spill",
duke@0 1302 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
duke@0 1303 Matcher::regName[dst_first],
duke@0 1304 offset);
duke@0 1305 #endif
duke@0 1306 }
duke@0 1307 return
duke@0 1308 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1309 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1310 ? 5
duke@0 1311 : 6); // REX
duke@0 1312 } else {
duke@0 1313 // 32-bit
duke@0 1314 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1315 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1316 int offset = ra_->reg2offset(src_first);
duke@0 1317 if (cbuf) {
duke@0 1318 emit_opcode(*cbuf, 0xF3);
duke@0 1319 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1320 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1321 }
duke@0 1322 emit_opcode(*cbuf, 0x0F);
duke@0 1323 emit_opcode(*cbuf, 0x10);
duke@0 1324 encode_RegMem(*cbuf,
duke@0 1325 Matcher::_regEncode[dst_first],
duke@0 1326 RSP_enc, 0x4, 0, offset,
duke@0 1327 false);
duke@0 1328 #ifndef PRODUCT
duke@0 1329 } else if (!do_size) {
duke@0 1330 st->print("movss %s, [rsp + #%d]\t# spill",
duke@0 1331 Matcher::regName[dst_first],
duke@0 1332 offset);
duke@0 1333 #endif
duke@0 1334 }
duke@0 1335 return
duke@0 1336 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1337 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1338 ? 5
duke@0 1339 : 6); // REX
duke@0 1340 }
duke@0 1341 }
duke@0 1342 } else if (src_first_rc == rc_int) {
duke@0 1343 // gpr ->
duke@0 1344 if (dst_first_rc == rc_stack) {
duke@0 1345 // gpr -> mem
duke@0 1346 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1347 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1348 // 64-bit
duke@0 1349 int offset = ra_->reg2offset(dst_first);
duke@0 1350 if (cbuf) {
duke@0 1351 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1352 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1353 } else {
duke@0 1354 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1355 }
duke@0 1356 emit_opcode(*cbuf, 0x89);
duke@0 1357 encode_RegMem(*cbuf,
duke@0 1358 Matcher::_regEncode[src_first],
duke@0 1359 RSP_enc, 0x4, 0, offset,
duke@0 1360 false);
duke@0 1361 #ifndef PRODUCT
duke@0 1362 } else if (!do_size) {
duke@0 1363 st->print("movq [rsp + #%d], %s\t# spill",
duke@0 1364 offset,
duke@0 1365 Matcher::regName[src_first]);
duke@0 1366 #endif
duke@0 1367 }
duke@0 1368 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1369 } else {
duke@0 1370 // 32-bit
duke@0 1371 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1372 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1373 int offset = ra_->reg2offset(dst_first);
duke@0 1374 if (cbuf) {
duke@0 1375 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1376 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1377 }
duke@0 1378 emit_opcode(*cbuf, 0x89);
duke@0 1379 encode_RegMem(*cbuf,
duke@0 1380 Matcher::_regEncode[src_first],
duke@0 1381 RSP_enc, 0x4, 0, offset,
duke@0 1382 false);
duke@0 1383 #ifndef PRODUCT
duke@0 1384 } else if (!do_size) {
duke@0 1385 st->print("movl [rsp + #%d], %s\t# spill",
duke@0 1386 offset,
duke@0 1387 Matcher::regName[src_first]);
duke@0 1388 #endif
duke@0 1389 }
duke@0 1390 return
duke@0 1391 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1392 ((Matcher::_regEncode[src_first] < 8)
duke@0 1393 ? 3
duke@0 1394 : 4); // REX
duke@0 1395 }
duke@0 1396 } else if (dst_first_rc == rc_int) {
duke@0 1397 // gpr -> gpr
duke@0 1398 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1399 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1400 // 64-bit
duke@0 1401 if (cbuf) {
duke@0 1402 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1403 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1404 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1405 } else {
duke@0 1406 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1407 }
duke@0 1408 } else {
duke@0 1409 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1410 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1411 } else {
duke@0 1412 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1413 }
duke@0 1414 }
duke@0 1415 emit_opcode(*cbuf, 0x8B);
duke@0 1416 emit_rm(*cbuf, 0x3,
duke@0 1417 Matcher::_regEncode[dst_first] & 7,
duke@0 1418 Matcher::_regEncode[src_first] & 7);
duke@0 1419 #ifndef PRODUCT
duke@0 1420 } else if (!do_size) {
duke@0 1421 st->print("movq %s, %s\t# spill",
duke@0 1422 Matcher::regName[dst_first],
duke@0 1423 Matcher::regName[src_first]);
duke@0 1424 #endif
duke@0 1425 }
duke@0 1426 return 3; // REX
duke@0 1427 } else {
duke@0 1428 // 32-bit
duke@0 1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1431 if (cbuf) {
duke@0 1432 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1433 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1434 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1435 }
duke@0 1436 } else {
duke@0 1437 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1438 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1439 } else {
duke@0 1440 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1441 }
duke@0 1442 }
duke@0 1443 emit_opcode(*cbuf, 0x8B);
duke@0 1444 emit_rm(*cbuf, 0x3,
duke@0 1445 Matcher::_regEncode[dst_first] & 7,
duke@0 1446 Matcher::_regEncode[src_first] & 7);
duke@0 1447 #ifndef PRODUCT
duke@0 1448 } else if (!do_size) {
duke@0 1449 st->print("movl %s, %s\t# spill",
duke@0 1450 Matcher::regName[dst_first],
duke@0 1451 Matcher::regName[src_first]);
duke@0 1452 #endif
duke@0 1453 }
duke@0 1454 return
duke@0 1455 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1456 ? 2
duke@0 1457 : 3; // REX
duke@0 1458 }
duke@0 1459 } else if (dst_first_rc == rc_float) {
duke@0 1460 // gpr -> xmm
duke@0 1461 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1462 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1463 // 64-bit
duke@0 1464 if (cbuf) {
duke@0 1465 emit_opcode(*cbuf, 0x66);
duke@0 1466 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1467 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1468 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1469 } else {
duke@0 1470 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1471 }
duke@0 1472 } else {
duke@0 1473 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1474 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1475 } else {
duke@0 1476 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1477 }
duke@0 1478 }
duke@0 1479 emit_opcode(*cbuf, 0x0F);
duke@0 1480 emit_opcode(*cbuf, 0x6E);
duke@0 1481 emit_rm(*cbuf, 0x3,
duke@0 1482 Matcher::_regEncode[dst_first] & 7,
duke@0 1483 Matcher::_regEncode[src_first] & 7);
duke@0 1484 #ifndef PRODUCT
duke@0 1485 } else if (!do_size) {
duke@0 1486 st->print("movdq %s, %s\t# spill",
duke@0 1487 Matcher::regName[dst_first],
duke@0 1488 Matcher::regName[src_first]);
duke@0 1489 #endif
duke@0 1490 }
duke@0 1491 return 5; // REX
duke@0 1492 } else {
duke@0 1493 // 32-bit
duke@0 1494 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1495 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1496 if (cbuf) {
duke@0 1497 emit_opcode(*cbuf, 0x66);
duke@0 1498 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1499 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1500 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1501 }
duke@0 1502 } else {
duke@0 1503 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1504 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1505 } else {
duke@0 1506 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1507 }
duke@0 1508 }
duke@0 1509 emit_opcode(*cbuf, 0x0F);
duke@0 1510 emit_opcode(*cbuf, 0x6E);
duke@0 1511 emit_rm(*cbuf, 0x3,
duke@0 1512 Matcher::_regEncode[dst_first] & 7,
duke@0 1513 Matcher::_regEncode[src_first] & 7);
duke@0 1514 #ifndef PRODUCT
duke@0 1515 } else if (!do_size) {
duke@0 1516 st->print("movdl %s, %s\t# spill",
duke@0 1517 Matcher::regName[dst_first],
duke@0 1518 Matcher::regName[src_first]);
duke@0 1519 #endif
duke@0 1520 }
duke@0 1521 return
duke@0 1522 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1523 ? 4
duke@0 1524 : 5; // REX
duke@0 1525 }
duke@0 1526 }
duke@0 1527 } else if (src_first_rc == rc_float) {
duke@0 1528 // xmm ->
duke@0 1529 if (dst_first_rc == rc_stack) {
duke@0 1530 // xmm -> mem
duke@0 1531 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1532 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1533 // 64-bit
duke@0 1534 int offset = ra_->reg2offset(dst_first);
duke@0 1535 if (cbuf) {
duke@0 1536 emit_opcode(*cbuf, 0xF2);
duke@0 1537 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1538 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1539 }
duke@0 1540 emit_opcode(*cbuf, 0x0F);
duke@0 1541 emit_opcode(*cbuf, 0x11);
duke@0 1542 encode_RegMem(*cbuf,
duke@0 1543 Matcher::_regEncode[src_first],
duke@0 1544 RSP_enc, 0x4, 0, offset,
duke@0 1545 false);
duke@0 1546 #ifndef PRODUCT
duke@0 1547 } else if (!do_size) {
duke@0 1548 st->print("movsd [rsp + #%d], %s\t# spill",
duke@0 1549 offset,
duke@0 1550 Matcher::regName[src_first]);
duke@0 1551 #endif
duke@0 1552 }
duke@0 1553 return
duke@0 1554 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1555 ((Matcher::_regEncode[src_first] < 8)
duke@0 1556 ? 5
duke@0 1557 : 6); // REX
duke@0 1558 } else {
duke@0 1559 // 32-bit
duke@0 1560 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1561 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1562 int offset = ra_->reg2offset(dst_first);
duke@0 1563 if (cbuf) {
duke@0 1564 emit_opcode(*cbuf, 0xF3);
duke@0 1565 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1566 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1567 }
duke@0 1568 emit_opcode(*cbuf, 0x0F);
duke@0 1569 emit_opcode(*cbuf, 0x11);
duke@0 1570 encode_RegMem(*cbuf,
duke@0 1571 Matcher::_regEncode[src_first],
duke@0 1572 RSP_enc, 0x4, 0, offset,
duke@0 1573 false);
duke@0 1574 #ifndef PRODUCT
duke@0 1575 } else if (!do_size) {
duke@0 1576 st->print("movss [rsp + #%d], %s\t# spill",
duke@0 1577 offset,
duke@0 1578 Matcher::regName[src_first]);
duke@0 1579 #endif
duke@0 1580 }
duke@0 1581 return
duke@0 1582 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1583 ((Matcher::_regEncode[src_first] < 8)
duke@0 1584 ? 5
duke@0 1585 : 6); // REX
duke@0 1586 }
duke@0 1587 } else if (dst_first_rc == rc_int) {
duke@0 1588 // xmm -> gpr
duke@0 1589 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1590 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1591 // 64-bit
duke@0 1592 if (cbuf) {
duke@0 1593 emit_opcode(*cbuf, 0x66);
duke@0 1594 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1595 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1596 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1597 } else {
duke@0 1598 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
duke@0 1599 }
duke@0 1600 } else {
duke@0 1601 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1602 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
duke@0 1603 } else {
duke@0 1604 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1605 }
duke@0 1606 }
duke@0 1607 emit_opcode(*cbuf, 0x0F);
duke@0 1608 emit_opcode(*cbuf, 0x7E);
duke@0 1609 emit_rm(*cbuf, 0x3,
duke@0 1610 Matcher::_regEncode[dst_first] & 7,
duke@0 1611 Matcher::_regEncode[src_first] & 7);
duke@0 1612 #ifndef PRODUCT
duke@0 1613 } else if (!do_size) {
duke@0 1614 st->print("movdq %s, %s\t# spill",
duke@0 1615 Matcher::regName[dst_first],
duke@0 1616 Matcher::regName[src_first]);
duke@0 1617 #endif
duke@0 1618 }
duke@0 1619 return 5; // REX
duke@0 1620 } else {
duke@0 1621 // 32-bit
duke@0 1622 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1623 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1624 if (cbuf) {
duke@0 1625 emit_opcode(*cbuf, 0x66);
duke@0 1626 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1627 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1628 emit_opcode(*cbuf, Assembler::REX_R); // attention!
duke@0 1629 }
duke@0 1630 } else {
duke@0 1631 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1632 emit_opcode(*cbuf, Assembler::REX_B); // attention!
duke@0 1633 } else {
duke@0 1634 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1635 }
duke@0 1636 }
duke@0 1637 emit_opcode(*cbuf, 0x0F);
duke@0 1638 emit_opcode(*cbuf, 0x7E);
duke@0 1639 emit_rm(*cbuf, 0x3,
duke@0 1640 Matcher::_regEncode[dst_first] & 7,
duke@0 1641 Matcher::_regEncode[src_first] & 7);
duke@0 1642 #ifndef PRODUCT
duke@0 1643 } else if (!do_size) {
duke@0 1644 st->print("movdl %s, %s\t# spill",
duke@0 1645 Matcher::regName[dst_first],
duke@0 1646 Matcher::regName[src_first]);
duke@0 1647 #endif
duke@0 1648 }
duke@0 1649 return
duke@0 1650 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1651 ? 4
duke@0 1652 : 5; // REX
duke@0 1653 }
duke@0 1654 } else if (dst_first_rc == rc_float) {
duke@0 1655 // xmm -> xmm
duke@0 1656 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1657 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1658 // 64-bit
duke@0 1659 if (cbuf) {
duke@0 1660 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 1661 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1662 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1663 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1664 }
duke@0 1665 } else {
duke@0 1666 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1667 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1668 } else {
duke@0 1669 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1670 }
duke@0 1671 }
duke@0 1672 emit_opcode(*cbuf, 0x0F);
duke@0 1673 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1674 emit_rm(*cbuf, 0x3,
duke@0 1675 Matcher::_regEncode[dst_first] & 7,
duke@0 1676 Matcher::_regEncode[src_first] & 7);
duke@0 1677 #ifndef PRODUCT
duke@0 1678 } else if (!do_size) {
duke@0 1679 st->print("%s %s, %s\t# spill",
duke@0 1680 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
duke@0 1681 Matcher::regName[dst_first],
duke@0 1682 Matcher::regName[src_first]);
duke@0 1683 #endif
duke@0 1684 }
duke@0 1685 return
duke@0 1686 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1687 ? 4
duke@0 1688 : 5; // REX
duke@0 1689 } else {
duke@0 1690 // 32-bit
duke@0 1691 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1692 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1693 if (cbuf) {
duke@0 1694 if (!UseXmmRegToRegMoveAll)
duke@0 1695 emit_opcode(*cbuf, 0xF3);
duke@0 1696 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1697 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1698 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1699 }
duke@0 1700 } else {
duke@0 1701 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1702 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1703 } else {
duke@0 1704 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1705 }
duke@0 1706 }
duke@0 1707 emit_opcode(*cbuf, 0x0F);
duke@0 1708 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1709 emit_rm(*cbuf, 0x3,
duke@0 1710 Matcher::_regEncode[dst_first] & 7,
duke@0 1711 Matcher::_regEncode[src_first] & 7);
duke@0 1712 #ifndef PRODUCT
duke@0 1713 } else if (!do_size) {
duke@0 1714 st->print("%s %s, %s\t# spill",
duke@0 1715 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
duke@0 1716 Matcher::regName[dst_first],
duke@0 1717 Matcher::regName[src_first]);
duke@0 1718 #endif
duke@0 1719 }
duke@0 1720 return
duke@0 1721 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1722 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 1723 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
duke@0 1724 }
duke@0 1725 }
duke@0 1726 }
duke@0 1727
duke@0 1728 assert(0," foo ");
duke@0 1729 Unimplemented();
duke@0 1730
duke@0 1731 return 0;
duke@0 1732 }
duke@0 1733
duke@0 1734 #ifndef PRODUCT
duke@0 1735 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
duke@0 1736 {
duke@0 1737 implementation(NULL, ra_, false, st);
duke@0 1738 }
duke@0 1739 #endif
duke@0 1740
duke@0 1741 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 1742 {
duke@0 1743 implementation(&cbuf, ra_, false, NULL);
duke@0 1744 }
duke@0 1745
duke@0 1746 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
duke@0 1747 {
duke@0 1748 return implementation(NULL, ra_, true, NULL);
duke@0 1749 }
duke@0 1750
duke@0 1751 //=============================================================================
duke@0 1752 #ifndef PRODUCT
duke@0 1753 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 1754 {
duke@0 1755 st->print("nop \t# %d bytes pad for loops and calls", _count);
duke@0 1756 }
duke@0 1757 #endif
duke@0 1758
duke@0 1759 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
duke@0 1760 {
duke@0 1761 MacroAssembler _masm(&cbuf);
duke@0 1762 __ nop(_count);
duke@0 1763 }
duke@0 1764
duke@0 1765 uint MachNopNode::size(PhaseRegAlloc*) const
duke@0 1766 {
duke@0 1767 return _count;
duke@0 1768 }
duke@0 1769
duke@0 1770
duke@0 1771 //=============================================================================
duke@0 1772 #ifndef PRODUCT
duke@0 1773 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1774 {
duke@0 1775 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1776 int reg = ra_->get_reg_first(this);
duke@0 1777 st->print("leaq %s, [rsp + #%d]\t# box lock",
duke@0 1778 Matcher::regName[reg], offset);
duke@0 1779 }
duke@0 1780 #endif
duke@0 1781
duke@0 1782 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1783 {
duke@0 1784 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1785 int reg = ra_->get_encode(this);
duke@0 1786 if (offset >= 0x80) {
duke@0 1787 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1788 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1789 emit_rm(cbuf, 0x2, reg & 7, 0x04);
duke@0 1790 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1791 emit_d32(cbuf, offset);
duke@0 1792 } else {
duke@0 1793 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1794 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1795 emit_rm(cbuf, 0x1, reg & 7, 0x04);
duke@0 1796 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1797 emit_d8(cbuf, offset);
duke@0 1798 }
duke@0 1799 }
duke@0 1800
duke@0 1801 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
duke@0 1802 {
duke@0 1803 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1804 return (offset < 0x80) ? 5 : 8; // REX
duke@0 1805 }
duke@0 1806
duke@0 1807 //=============================================================================
duke@0 1808
duke@0 1809 // emit call stub, compiled java to interpreter
duke@0 1810 void emit_java_to_interp(CodeBuffer& cbuf)
duke@0 1811 {
duke@0 1812 // Stub is fixed up when the corresponding call is converted from
duke@0 1813 // calling compiled code to calling interpreted code.
duke@0 1814 // movq rbx, 0
duke@0 1815 // jmp -5 # to self
duke@0 1816
duke@0 1817 address mark = cbuf.inst_mark(); // get mark within main instrs section
duke@0 1818
duke@0 1819 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1820 // That's why we must use the macroassembler to generate a stub.
duke@0 1821 MacroAssembler _masm(&cbuf);
duke@0 1822
duke@0 1823 address base =
duke@0 1824 __ start_a_stub(Compile::MAX_stubs_size);
duke@0 1825 if (base == NULL) return; // CodeBuffer::expand failed
duke@0 1826 // static stub relocation stores the instruction address of the call
duke@0 1827 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
duke@0 1828 // static stub relocation also tags the methodOop in the code-stream.
duke@0 1829 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
never@304 1830 // This is recognized as unresolved by relocs/nativeinst/ic code
duke@0 1831 __ jump(RuntimeAddress(__ pc()));
duke@0 1832
duke@0 1833 // Update current stubs pointer and restore code_end.
duke@0 1834 __ end_a_stub();
duke@0 1835 }
duke@0 1836
duke@0 1837 // size of call stub, compiled java to interpretor
duke@0 1838 uint size_java_to_interp()
duke@0 1839 {
duke@0 1840 return 15; // movq (1+1+8); jmp (1+4)
duke@0 1841 }
duke@0 1842
duke@0 1843 // relocation entries for call stub, compiled java to interpretor
duke@0 1844 uint reloc_java_to_interp()
duke@0 1845 {
duke@0 1846 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
duke@0 1847 }
duke@0 1848
duke@0 1849 //=============================================================================
duke@0 1850 #ifndef PRODUCT
duke@0 1851 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1852 {
coleenp@113 1853 if (UseCompressedOops) {
coleenp@113 1854 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
kvn@642 1855 if (Universe::narrow_oop_shift() != 0) {
kvn@642 1856 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
kvn@642 1857 }
coleenp@113 1858 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
coleenp@113 1859 } else {
coleenp@113 1860 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
coleenp@113 1861 "# Inline cache check", oopDesc::klass_offset_in_bytes());
coleenp@113 1862 }
duke@0 1863 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
duke@0 1864 st->print_cr("\tnop");
duke@0 1865 if (!OptoBreakpoint) {
duke@0 1866 st->print_cr("\tnop");
duke@0 1867 }
duke@0 1868 }
duke@0 1869 #endif
duke@0 1870
duke@0 1871 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1872 {
duke@0 1873 MacroAssembler masm(&cbuf);
duke@0 1874 #ifdef ASSERT
duke@0 1875 uint code_size = cbuf.code_size();
duke@0 1876 #endif
coleenp@113 1877 if (UseCompressedOops) {
coleenp@113 1878 masm.load_klass(rscratch1, j_rarg0);
never@304 1879 masm.cmpptr(rax, rscratch1);
coleenp@113 1880 } else {
never@304 1881 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
coleenp@113 1882 }
duke@0 1883
duke@0 1884 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
duke@0 1885
duke@0 1886 /* WARNING these NOPs are critical so that verified entry point is properly
duke@0 1887 aligned for patching by NativeJump::patch_verified_entry() */
duke@0 1888 int nops_cnt = 1;
duke@0 1889 if (!OptoBreakpoint) {
duke@0 1890 // Leave space for int3
duke@0 1891 nops_cnt += 1;
duke@0 1892 }
coleenp@113 1893 if (UseCompressedOops) {
coleenp@113 1894 // ??? divisible by 4 is aligned?
coleenp@113 1895 nops_cnt += 1;
coleenp@113 1896 }
duke@0 1897 masm.nop(nops_cnt);
duke@0 1898
duke@0 1899 assert(cbuf.code_size() - code_size == size(ra_),
duke@0 1900 "checking code size of inline cache node");
duke@0 1901 }
duke@0 1902
duke@0 1903 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
duke@0 1904 {
coleenp@113 1905 if (UseCompressedOops) {
kvn@642 1906 if (Universe::narrow_oop_shift() == 0) {
kvn@642 1907 return OptoBreakpoint ? 15 : 16;
kvn@642 1908 } else {
kvn@642 1909 return OptoBreakpoint ? 19 : 20;
kvn@642 1910 }
coleenp@113 1911 } else {
coleenp@113 1912 return OptoBreakpoint ? 11 : 12;
coleenp@113 1913 }
duke@0 1914 }
duke@0 1915
duke@0 1916
duke@0 1917 //=============================================================================
duke@0 1918 uint size_exception_handler()
duke@0 1919 {
duke@0 1920 // NativeCall instruction size is the same as NativeJump.
duke@0 1921 // Note that this value is also credited (in output.cpp) to
duke@0 1922 // the size of the code section.
duke@0 1923 return NativeJump::instruction_size;
duke@0 1924 }
duke@0 1925
duke@0 1926 // Emit exception handler code.
duke@0 1927 int emit_exception_handler(CodeBuffer& cbuf)
duke@0 1928 {
duke@0 1929
duke@0 1930 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1931 // That's why we must use the macroassembler to generate a handler.
duke@0 1932 MacroAssembler _masm(&cbuf);
duke@0 1933 address base =
duke@0 1934 __ start_a_stub(size_exception_handler());
duke@0 1935 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1936 int offset = __ offset();
duke@0 1937 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
duke@0 1938 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
duke@0 1939 __ end_a_stub();
duke@0 1940 return offset;
duke@0 1941 }
duke@0 1942
duke@0 1943 uint size_deopt_handler()
duke@0 1944 {
duke@0 1945 // three 5 byte instructions
duke@0 1946 return 15;
duke@0 1947 }
duke@0 1948
duke@0 1949 // Emit deopt handler code.
duke@0 1950 int emit_deopt_handler(CodeBuffer& cbuf)
duke@0 1951 {
duke@0 1952
duke@0 1953 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1954 // That's why we must use the macroassembler to generate a handler.
duke@0 1955 MacroAssembler _masm(&cbuf);
duke@0 1956 address base =
duke@0 1957 __ start_a_stub(size_deopt_handler());
duke@0 1958 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1959 int offset = __ offset();
duke@0 1960 address the_pc = (address) __ pc();
duke@0 1961 Label next;
duke@0 1962 // push a "the_pc" on the stack without destroying any registers
duke@0 1963 // as they all may be live.
duke@0 1964
duke@0 1965 // push address of "next"
duke@0 1966 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
duke@0 1967 __ bind(next);
duke@0 1968 // adjust it so it matches "the_pc"
never@304 1969 __ subptr(Address(rsp, 0), __ offset() - offset);
duke@0 1970 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
duke@0 1971 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
duke@0 1972 __ end_a_stub();
duke@0 1973 return offset;
duke@0 1974 }
duke@0 1975
duke@0 1976 static void emit_double_constant(CodeBuffer& cbuf, double x) {
duke@0 1977 int mark = cbuf.insts()->mark_off();
duke@0 1978 MacroAssembler _masm(&cbuf);
duke@0 1979 address double_address = __ double_constant(x);
duke@0 1980 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1981 emit_d32_reloc(cbuf,
duke@0 1982 (int) (double_address - cbuf.code_end() - 4),
duke@0 1983 internal_word_Relocation::spec(double_address),
duke@0 1984 RELOC_DISP32);
duke@0 1985 }
duke@0 1986
duke@0 1987 static void emit_float_constant(CodeBuffer& cbuf, float x) {
duke@0 1988 int mark = cbuf.insts()->mark_off();
duke@0 1989 MacroAssembler _masm(&cbuf);
duke@0 1990 address float_address = __ float_constant(x);
duke@0 1991 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1992 emit_d32_reloc(cbuf,
duke@0 1993 (int) (float_address - cbuf.code_end() - 4),
duke@0 1994 internal_word_Relocation::spec(float_address),
duke@0 1995 RELOC_DISP32);
duke@0 1996 }
duke@0 1997
duke@0 1998
twisti@775 1999 const bool Matcher::match_rule_supported(int opcode) {
twisti@775 2000 if (!has_match_rule(opcode))
twisti@775 2001 return false;
twisti@775 2002
twisti@775 2003 return true; // Per default match rules are supported.
twisti@775 2004 }
twisti@775 2005
duke@0 2006 int Matcher::regnum_to_fpu_offset(int regnum)
duke@0 2007 {
duke@0 2008 return regnum - 32; // The FP registers are in the second chunk
duke@0 2009 }
duke@0 2010
duke@0 2011 // This is UltraSparc specific, true just means we have fast l2f conversion
duke@0 2012 const bool Matcher::convL2FSupported(void) {
duke@0 2013 return true;
duke@0 2014 }
duke@0 2015
duke@0 2016 // Vector width in bytes
duke@0 2017 const uint Matcher::vector_width_in_bytes(void) {
duke@0 2018 return 8;
duke@0 2019 }
duke@0 2020
duke@0 2021 // Vector ideal reg
duke@0 2022 const uint Matcher::vector_ideal_reg(void) {
duke@0 2023 return Op_RegD;
duke@0 2024 }
duke@0 2025
duke@0 2026 // Is this branch offset short enough that a short branch can be used?
duke@0 2027 //
duke@0 2028 // NOTE: If the platform does not provide any short branch variants, then
duke@0 2029 // this method should return false for offset 0.
never@415 2030 bool Matcher::is_short_branch_offset(int rule, int offset) {
never@415 2031 // the short version of jmpConUCF2 contains multiple branches,
never@415 2032 // making the reach slightly less
never@415 2033 if (rule == jmpConUCF2_rule)
never@415 2034 return (-126 <= offset && offset <= 125);
never@415 2035 return (-128 <= offset && offset <= 127);
duke@0 2036 }
duke@0 2037
duke@0 2038 const bool Matcher::isSimpleConstant64(jlong value) {
duke@0 2039 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
duke@0 2040 //return value == (int) value; // Cf. storeImmL and immL32.
duke@0 2041
duke@0 2042 // Probably always true, even if a temp register is required.
duke@0 2043 return true;
duke@0 2044 }
duke@0 2045
duke@0 2046 // The ecx parameter to rep stosq for the ClearArray node is in words.
duke@0 2047 const bool Matcher::init_array_count_is_in_bytes = false;
duke@0 2048
duke@0 2049 // Threshold size for cleararray.
duke@0 2050 const int Matcher::init_array_short_size = 8 * BytesPerLong;
duke@0 2051
duke@0 2052 // Should the Matcher clone shifts on addressing modes, expecting them
duke@0 2053 // to be subsumed into complex addressing expressions or compute them
duke@0 2054 // into registers? True for Intel but false for most RISCs
duke@0 2055 const bool Matcher::clone_shift_expressions = true;
duke@0 2056
duke@0 2057 // Is it better to copy float constants, or load them directly from
duke@0 2058 // memory? Intel can load a float constant from a direct address,
duke@0 2059 // requiring no extra registers. Most RISCs will have to materialize
duke@0 2060 // an address into a register first, so they would do better to copy
duke@0 2061 // the constant from stack.
duke@0 2062 const bool Matcher::rematerialize_float_constants = true; // XXX
duke@0 2063
duke@0 2064 // If CPU can load and store mis-aligned doubles directly then no
duke@0 2065 // fixup is needed. Else we split the double into 2 integer pieces
duke@0 2066 // and move it piece-by-piece. Only happens when passing doubles into
duke@0 2067 // C code as the Java calling convention forces doubles to be aligned.
duke@0 2068 const bool Matcher::misaligned_doubles_ok = true;
duke@0 2069
duke@0 2070 // No-op on amd64
duke@0 2071 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
duke@0 2072
duke@0 2073 // Advertise here if the CPU requires explicit rounding operations to
duke@0 2074 // implement the UseStrictFP mode.
duke@0 2075 const bool Matcher::strict_fp_requires_explicit_rounding = true;
duke@0 2076
kvn@1274 2077 // Are floats conerted to double when stored to stack during deoptimization?
kvn@1274 2078 // On x64 it is stored without convertion so we can use normal access.
kvn@1274 2079 bool Matcher::float_in_double() { return false; }
kvn@1274 2080
duke@0 2081 // Do ints take an entire long register or just half?
duke@0 2082 const bool Matcher::int_in_long = true;
duke@0 2083
duke@0 2084 // Return whether or not this register is ever used as an argument.
duke@0 2085 // This function is used on startup to build the trampoline stubs in
duke@0 2086 // generateOptoStub. Registers not mentioned will be killed by the VM
duke@0 2087 // call in the trampoline, and arguments in those registers not be
duke@0 2088 // available to the callee.
duke@0 2089 bool Matcher::can_be_java_arg(int reg)
duke@0 2090 {
duke@0 2091 return
duke@0 2092 reg == RDI_num || reg == RDI_H_num ||
duke@0 2093 reg == RSI_num || reg == RSI_H_num ||
duke@0 2094 reg == RDX_num || reg == RDX_H_num ||
duke@0 2095 reg == RCX_num || reg == RCX_H_num ||
duke@0 2096 reg == R8_num || reg == R8_H_num ||
duke@0 2097 reg == R9_num || reg == R9_H_num ||
coleenp@113 2098 reg == R12_num || reg == R12_H_num ||
duke@0 2099 reg == XMM0_num || reg == XMM0_H_num ||
duke@0 2100 reg == XMM1_num || reg == XMM1_H_num ||
duke@0 2101 reg == XMM2_num || reg == XMM2_H_num ||
duke@0 2102 reg == XMM3_num || reg == XMM3_H_num ||
duke@0 2103 reg == XMM4_num || reg == XMM4_H_num ||
duke@0 2104 reg == XMM5_num || reg == XMM5_H_num ||
duke@0 2105 reg == XMM6_num || reg == XMM6_H_num ||
duke@0 2106 reg == XMM7_num || reg == XMM7_H_num;
duke@0 2107 }
duke@0 2108
duke@0 2109 bool Matcher::is_spillable_arg(int reg)
duke@0 2110 {
duke@0 2111 return can_be_java_arg(reg);
duke@0 2112 }
duke@0 2113
duke@0 2114 // Register for DIVI projection of divmodI
duke@0 2115 RegMask Matcher::divI_proj_mask() {
duke@0 2116 return INT_RAX_REG_mask;
duke@0 2117 }
duke@0 2118
duke@0 2119 // Register for MODI projection of divmodI
duke@0 2120 RegMask Matcher::modI_proj_mask() {
duke@0 2121 return INT_RDX_REG_mask;
duke@0 2122 }
duke@0 2123
duke@0 2124 // Register for DIVL projection of divmodL
duke@0 2125 RegMask Matcher::divL_proj_mask() {
duke@0 2126 return LONG_RAX_REG_mask;
duke@0 2127 }
duke@0 2128
duke@0 2129 // Register for MODL projection of divmodL
duke@0 2130 RegMask Matcher::modL_proj_mask() {
duke@0 2131 return LONG_RDX_REG_mask;
duke@0 2132 }
duke@0 2133
twisti@1137 2134 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
twisti@1137 2135 return PTR_RBP_REG_mask;
twisti@1137 2136 }
twisti@1137 2137
coleenp@113 2138 static Address build_address(int b, int i, int s, int d) {
coleenp@113 2139 Register index = as_Register(i);
coleenp@113 2140 Address::ScaleFactor scale = (Address::ScaleFactor)s;
coleenp@113 2141 if (index == rsp) {
coleenp@113 2142 index = noreg;
coleenp@113 2143 scale = Address::no_scale;
coleenp@113 2144 }
coleenp@113 2145 Address addr(as_Register(b), index, scale, d);
coleenp@113 2146 return addr;
coleenp@113 2147 }
coleenp@113 2148
duke@0 2149 %}
duke@0 2150
duke@0 2151 //----------ENCODING BLOCK-----------------------------------------------------
duke@0 2152 // This block specifies the encoding classes used by the compiler to
duke@0 2153 // output byte streams. Encoding classes are parameterized macros
duke@0 2154 // used by Machine Instruction Nodes in order to generate the bit
duke@0 2155 // encoding of the instruction. Operands specify their base encoding
duke@0 2156 // interface with the interface keyword. There are currently
duke@0 2157 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
duke@0 2158 // COND_INTER. REG_INTER causes an operand to generate a function
duke@0 2159 // which returns its register number when queried. CONST_INTER causes
duke@0 2160 // an operand to generate a function which returns the value of the
duke@0 2161 // constant when queried. MEMORY_INTER causes an operand to generate
duke@0 2162 // four functions which return the Base Register, the Index Register,
duke@0 2163 // the Scale Value, and the Offset Value of the operand when queried.
duke@0 2164 // COND_INTER causes an operand to generate six functions which return
duke@0 2165 // the encoding code (ie - encoding bits for the instruction)
duke@0 2166 // associated with each basic boolean condition for a conditional
duke@0 2167 // instruction.
duke@0 2168 //
duke@0 2169 // Instructions specify two basic values for encoding. Again, a
duke@0 2170 // function is available to check if the constant displacement is an
duke@0 2171 // oop. They use the ins_encode keyword to specify their encoding
duke@0 2172 // classes (which must be a sequence of enc_class names, and their
duke@0 2173 // parameters, specified in the encoding block), and they use the
duke@0 2174 // opcode keyword to specify, in order, their primary, secondary, and
duke@0 2175 // tertiary opcode. Only the opcode sections which a particular
duke@0 2176 // instruction needs for encoding need to be specified.
duke@0 2177 encode %{
duke@0 2178 // Build emit functions for each basic byte or larger field in the
duke@0 2179 // intel encoding scheme (opcode, rm, sib, immediate), and call them
duke@0 2180 // from C++ code in the enc_class source block. Emit functions will
duke@0 2181 // live in the main source block for now. In future, we can
duke@0 2182 // generalize this by adding a syntax that specifies the sizes of
duke@0 2183 // fields in an order, so that the adlc can build the emit functions
duke@0 2184 // automagically
duke@0 2185
duke@0 2186 // Emit primary opcode
duke@0 2187 enc_class OpcP
duke@0 2188 %{
duke@0 2189 emit_opcode(cbuf, $primary);
duke@0 2190 %}
duke@0 2191
duke@0 2192 // Emit secondary opcode
duke@0 2193 enc_class OpcS
duke@0 2194 %{
duke@0 2195 emit_opcode(cbuf, $secondary);
duke@0 2196 %}
duke@0 2197
duke@0 2198 // Emit tertiary opcode
duke@0 2199 enc_class OpcT
duke@0 2200 %{
duke@0 2201 emit_opcode(cbuf, $tertiary);
duke@0 2202 %}
duke@0 2203
duke@0 2204 // Emit opcode directly
duke@0 2205 enc_class Opcode(immI d8)
duke@0 2206 %{
duke@0 2207 emit_opcode(cbuf, $d8$$constant);
duke@0 2208 %}
duke@0 2209
duke@0 2210 // Emit size prefix
duke@0 2211 enc_class SizePrefix
duke@0 2212 %{
duke@0 2213 emit_opcode(cbuf, 0x66);
duke@0 2214 %}
duke@0 2215
duke@0 2216 enc_class reg(rRegI reg)
duke@0 2217 %{
duke@0 2218 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
duke@0 2219 %}
duke@0 2220
duke@0 2221 enc_class reg_reg(rRegI dst, rRegI src)
duke@0 2222 %{
duke@0 2223 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2224 %}
duke@0 2225
duke@0 2226 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
duke@0 2227 %{
duke@0 2228 emit_opcode(cbuf, $opcode$$constant);
duke@0 2229 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2230 %}
duke@0 2231
duke@0 2232 enc_class cmpfp_fixup()
duke@0 2233 %{
duke@0 2234 // jnp,s exit
duke@0 2235 emit_opcode(cbuf, 0x7B);
duke@0 2236 emit_d8(cbuf, 0x0A);
duke@0 2237
duke@0 2238 // pushfq
duke@0 2239 emit_opcode(cbuf, 0x9C);
duke@0 2240
duke@0 2241 // andq $0xffffff2b, (%rsp)
duke@0 2242 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2243 emit_opcode(cbuf, 0x81);
duke@0 2244 emit_opcode(cbuf, 0x24);
duke@0 2245 emit_opcode(cbuf, 0x24);
duke@0 2246 emit_d32(cbuf, 0xffffff2b);
duke@0 2247
duke@0 2248 // popfq
duke@0 2249 emit_opcode(cbuf, 0x9D);
duke@0 2250
duke@0 2251 // nop (target for branch to avoid branch to branch)
duke@0 2252 emit_opcode(cbuf, 0x90);
duke@0 2253 %}
duke@0 2254
duke@0 2255 enc_class cmpfp3(rRegI dst)
duke@0 2256 %{
duke@0 2257 int dstenc = $dst$$reg;
duke@0 2258
duke@0 2259 // movl $dst, -1
duke@0 2260 if (dstenc >= 8) {
duke@0 2261 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2262 }
duke@0 2263 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 2264 emit_d32(cbuf, -1);
duke@0 2265
duke@0 2266 // jp,s done
duke@0 2267 emit_opcode(cbuf, 0x7A);
duke@0 2268 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
duke@0 2269
duke@0 2270 // jb,s done
duke@0 2271 emit_opcode(cbuf, 0x72);
duke@0 2272 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 2273
duke@0 2274 // setne $dst
duke@0 2275 if (dstenc >= 4) {
duke@0 2276 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2277 }
duke@0 2278 emit_opcode(cbuf, 0x0F);
duke@0 2279 emit_opcode(cbuf, 0x95);
duke@0 2280 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 2281
duke@0 2282 // movzbl $dst, $dst
duke@0 2283 if (dstenc >= 4) {
duke@0 2284 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 2285 }
duke@0 2286 emit_opcode(cbuf, 0x0F);
duke@0 2287 emit_opcode(cbuf, 0xB6);
duke@0 2288 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 2289 %}
duke@0 2290
duke@0 2291 enc_class cdql_enc(no_rax_rdx_RegI div)
duke@0 2292 %{
duke@0 2293 // Full implementation of Java idiv and irem; checks for
duke@0 2294 // special case as described in JVM spec., p.243 & p.271.
duke@0 2295 //
duke@0 2296 // normal case special case
duke@0 2297 //
duke@0 2298 // input : rax: dividend min_int
duke@0 2299 // reg: divisor -1
duke@0 2300 //
duke@0 2301 // output: rax: quotient (= rax idiv reg) min_int
duke@0 2302 // rdx: remainder (= rax irem reg) 0
duke@0 2303 //
duke@0 2304 // Code sequnce:
duke@0 2305 //
duke@0 2306 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
duke@0 2307 // 5: 75 07/08 jne e <normal>
duke@0 2308 // 7: 33 d2 xor %edx,%edx
duke@0 2309 // [div >= 8 -> offset + 1]
duke@0 2310 // [REX_B]
duke@0 2311 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2312 // c: 74 03/04 je 11 <done>
duke@0 2313 // 000000000000000e <normal>:
duke@0 2314 // e: 99 cltd
duke@0 2315 // [div >= 8 -> offset + 1]
duke@0 2316 // [REX_B]
duke@0 2317 // f: f7 f9 idiv $div
duke@0 2318 // 0000000000000011 <done>:
duke@0 2319
duke@0 2320 // cmp $0x80000000,%eax
duke@0 2321 emit_opcode(cbuf, 0x3d);
duke@0 2322 emit_d8(cbuf, 0x00);
duke@0 2323 emit_d8(cbuf, 0x00);
duke@0 2324 emit_d8(cbuf, 0x00);
duke@0 2325 emit_d8(cbuf, 0x80);
duke@0 2326
duke@0 2327 // jne e <normal>
duke@0 2328 emit_opcode(cbuf, 0x75);
duke@0 2329 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
duke@0 2330
duke@0 2331 // xor %edx,%edx
duke@0 2332 emit_opcode(cbuf, 0x33);
duke@0 2333 emit_d8(cbuf, 0xD2);
duke@0 2334
duke@0 2335 // cmp $0xffffffffffffffff,%ecx
duke@0 2336 if ($div$$reg >= 8) {
duke@0 2337 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2338 }
duke@0 2339 emit_opcode(cbuf, 0x83);
duke@0 2340 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2341 emit_d8(cbuf, 0xFF);
duke@0 2342
duke@0 2343 // je 11 <done>
duke@0 2344 emit_opcode(cbuf, 0x74);
duke@0 2345 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
duke@0 2346
duke@0 2347 // <normal>
duke@0 2348 // cltd
duke@0 2349 emit_opcode(cbuf, 0x99);
duke@0 2350
duke@0 2351 // idivl (note: must be emitted by the user of this rule)
duke@0 2352 // <done>
duke@0 2353 %}
duke@0 2354
duke@0 2355 enc_class cdqq_enc(no_rax_rdx_RegL div)
duke@0 2356 %{
duke@0 2357 // Full implementation of Java ldiv and lrem; checks for
duke@0 2358 // special case as described in JVM spec., p.243 & p.271.
duke@0 2359 //
duke@0 2360 // normal case special case
duke@0 2361 //
duke@0 2362 // input : rax: dividend min_long
duke@0 2363 // reg: divisor -1
duke@0 2364 //
duke@0 2365 // output: rax: quotient (= rax idiv reg) min_long
duke@0 2366 // rdx: remainder (= rax irem reg) 0
duke@0 2367 //
duke@0 2368 // Code sequnce:
duke@0 2369 //
duke@0 2370 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
duke@0 2371 // 7: 00 00 80
duke@0 2372 // a: 48 39 d0 cmp %rdx,%rax
duke@0 2373 // d: 75 08 jne 17 <normal>
duke@0 2374 // f: 33 d2 xor %edx,%edx
duke@0 2375 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2376 // 15: 74 05 je 1c <done>
duke@0 2377 // 0000000000000017 <normal>:
duke@0 2378 // 17: 48 99 cqto
duke@0 2379 // 19: 48 f7 f9 idiv $div
duke@0 2380 // 000000000000001c <done>:
duke@0 2381
duke@0 2382 // mov $0x8000000000000000,%rdx
duke@0 2383 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2384 emit_opcode(cbuf, 0xBA);
duke@0 2385 emit_d8(cbuf, 0x00);
duke@0 2386 emit_d8(cbuf, 0x00);
duke@0 2387 emit_d8(cbuf, 0x00);
duke@0 2388 emit_d8(cbuf, 0x00);
duke@0 2389 emit_d8(cbuf, 0x00);
duke@0 2390 emit_d8(cbuf, 0x00);
duke@0 2391 emit_d8(cbuf, 0x00);
duke@0 2392 emit_d8(cbuf, 0x80);
duke@0 2393
duke@0 2394 // cmp %rdx,%rax
duke@0 2395 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2396 emit_opcode(cbuf, 0x39);
duke@0 2397 emit_d8(cbuf, 0xD0);
duke@0 2398
duke@0 2399 // jne 17 <normal>
duke@0 2400 emit_opcode(cbuf, 0x75);
duke@0 2401 emit_d8(cbuf, 0x08);
duke@0 2402
duke@0 2403 // xor %edx,%edx
duke@0 2404 emit_opcode(cbuf, 0x33);
duke@0 2405 emit_d8(cbuf, 0xD2);
duke@0 2406
duke@0 2407 // cmp $0xffffffffffffffff,$div
duke@0 2408 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
duke@0 2409 emit_opcode(cbuf, 0x83);
duke@0 2410 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2411 emit_d8(cbuf, 0xFF);
duke@0 2412
duke@0 2413 // je 1e <done>
duke@0 2414 emit_opcode(cbuf, 0x74);
duke@0 2415 emit_d8(cbuf, 0x05);
duke@0 2416
duke@0 2417 // <normal>
duke@0 2418 // cqto
duke@0 2419 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2420 emit_opcode(cbuf, 0x99);
duke@0 2421
duke@0 2422 // idivq (note: must be emitted by the user of this rule)
duke@0 2423 // <done>
duke@0 2424 %}
duke@0 2425
duke@0 2426 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
duke@0 2427 enc_class OpcSE(immI imm)
duke@0 2428 %{
duke@0 2429 // Emit primary opcode and set sign-extend bit
duke@0 2430 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2431 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2432 emit_opcode(cbuf, $primary | 0x02);
duke@0 2433 } else {
duke@0 2434 // 32-bit immediate
duke@0 2435 emit_opcode(cbuf, $primary);
duke@0 2436 }
duke@0 2437 %}
duke@0 2438
duke@0 2439 enc_class OpcSErm(rRegI dst, immI imm)
duke@0 2440 %{
duke@0 2441 // OpcSEr/m
duke@0 2442 int dstenc = $dst$$reg;
duke@0 2443 if (dstenc >= 8) {
duke@0 2444 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2445 dstenc -= 8;
duke@0 2446 }
duke@0 2447 // Emit primary opcode and set sign-extend bit
duke@0 2448 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2449 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2450 emit_opcode(cbuf, $primary | 0x02);
duke@0 2451 } else {
duke@0 2452 // 32-bit immediate
duke@0 2453 emit_opcode(cbuf, $primary);
duke@0 2454 }
duke@0 2455 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2456 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2457 %}
duke@0 2458
duke@0 2459 enc_class OpcSErm_wide(rRegL dst, immI imm)
duke@0 2460 %{
duke@0 2461 // OpcSEr/m
duke@0 2462 int dstenc = $dst$$reg;
duke@0 2463 if (dstenc < 8) {
duke@0 2464 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2465 } else {
duke@0 2466 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2467 dstenc -= 8;
duke@0 2468 }
duke@0 2469 // Emit primary opcode and set sign-extend bit
duke@0 2470 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2471 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2472 emit_opcode(cbuf, $primary | 0x02);
duke@0 2473 } else {
duke@0 2474 // 32-bit immediate
duke@0 2475 emit_opcode(cbuf, $primary);
duke@0 2476 }
duke@0 2477 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2478 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2479 %}
duke@0 2480
duke@0 2481 enc_class Con8or32(immI imm)
duke@0 2482 %{
duke@0 2483 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2484 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2485 $$$emit8$imm$$constant;
duke@0 2486 } else {
duke@0 2487 // 32-bit immediate
duke@0 2488 $$$emit32$imm$$constant;
duke@0 2489 }
duke@0 2490 %}
duke@0 2491
duke@0 2492 enc_class Lbl(label labl)
duke@0 2493 %{
duke@0 2494 // JMP, CALL
duke@0 2495 Label* l = $labl$$label;
duke@0 2496 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2497 %}
duke@0 2498
duke@0 2499 enc_class LblShort(label labl)
duke@0 2500 %{
duke@0 2501 // JMP, CALL
duke@0 2502 Label* l = $labl$$label;
duke@0 2503 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2504 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2505 emit_d8(cbuf, disp);
duke@0 2506 %}
duke@0 2507
duke@0 2508 enc_class opc2_reg(rRegI dst)
duke@0 2509 %{
duke@0 2510 // BSWAP
duke@0 2511 emit_cc(cbuf, $secondary, $dst$$reg);
duke@0 2512 %}
duke@0 2513
duke@0 2514 enc_class opc3_reg(rRegI dst)
duke@0 2515 %{
duke@0 2516 // BSWAP
duke@0 2517 emit_cc(cbuf, $tertiary, $dst$$reg);
duke@0 2518 %}
duke@0 2519
duke@0 2520 enc_class reg_opc(rRegI div)
duke@0 2521 %{
duke@0 2522 // INC, DEC, IDIV, IMOD, JMP indirect, ...
duke@0 2523 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
duke@0 2524 %}
duke@0 2525
duke@0 2526 enc_class Jcc(cmpOp cop, label labl)
duke@0 2527 %{
duke@0 2528 // JCC
duke@0 2529 Label* l = $labl$$label;
duke@0 2530 $$$emit8$primary;
duke@0 2531 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2532 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2533 %}
duke@0 2534
duke@0 2535 enc_class JccShort (cmpOp cop, label labl)
duke@0 2536 %{
duke@0 2537 // JCC
duke@0 2538 Label *l = $labl$$label;
duke@0 2539 emit_cc(cbuf, $primary, $cop$$cmpcode);
duke@0 2540 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2541 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2542 emit_d8(cbuf, disp);
duke@0 2543 %}
duke@0 2544
duke@0 2545 enc_class enc_cmov(cmpOp cop)
duke@0 2546 %{
duke@0 2547 // CMOV
duke@0 2548 $$$emit8$primary;
duke@0 2549 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2550 %}
duke@0 2551
duke@0 2552 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
duke@0 2553 %{
duke@0 2554 // Invert sense of branch from sense of cmov
duke@0 2555 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2556 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
duke@0 2557 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 2558 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
duke@0 2559 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
duke@0 2560 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
duke@0 2561 if ($dst$$reg < 8) {
duke@0 2562 if ($src$$reg >= 8) {
duke@0 2563 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2564 }
duke@0 2565 } else {
duke@0 2566 if ($src$$reg < 8) {
duke@0 2567 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2568 } else {
duke@0 2569 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2570 }
duke@0 2571 }
duke@0 2572 emit_opcode(cbuf, 0x0F);
duke@0 2573 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2574 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2575 %}
duke@0 2576
duke@0 2577 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
duke@0 2578 %{
duke@0 2579 // Invert sense of branch from sense of cmov
duke@0 2580 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2581 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
duke@0 2582
duke@0 2583 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
duke@0 2584 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 2585 if ($dst$$reg < 8) {
duke@0 2586 if ($src$$reg >= 8) {
duke@0 2587 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2588 }
duke@0 2589 } else {
duke@0 2590 if ($src$$reg < 8) {
duke@0 2591 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2592 } else {
duke@0 2593 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2594 }
duke@0 2595 }
duke@0 2596 emit_opcode(cbuf, 0x0F);
duke@0 2597 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2598 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2599 %}
duke@0 2600
duke@0 2601 enc_class enc_PartialSubtypeCheck()
duke@0 2602 %{
duke@0 2603 Register Rrdi = as_Register(RDI_enc); // result register
duke@0 2604 Register Rrax = as_Register(RAX_enc); // super class
duke@0 2605 Register Rrcx = as_Register(RCX_enc); // killed
duke@0 2606 Register Rrsi = as_Register(RSI_enc); // sub class
jrose@644 2607 Label miss;
jrose@644 2608 const bool set_cond_codes = true;
duke@0 2609
duke@0 2610 MacroAssembler _masm(&cbuf);
jrose@644 2611 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
jrose@644 2612 NULL, &miss,
jrose@644 2613 /*set_cond_codes:*/ true);
duke@0 2614 if ($primary) {
never@304 2615 __ xorptr(Rrdi, Rrdi);
duke@0 2616 }
duke@0 2617 __ bind(miss);
duke@0 2618 %}
duke@0 2619
duke@0 2620 enc_class Java_To_Interpreter(method meth)
duke@0 2621 %{
duke@0 2622 // CALL Java_To_Interpreter
duke@0 2623 // This is the instruction starting address for relocation info.
duke@0 2624 cbuf.set_inst_mark();
duke@0 2625 $$$emit8$primary;
duke@0 2626 // CALL directly to the runtime
duke@0 2627 emit_d32_reloc(cbuf,
duke@0 2628 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2629 runtime_call_Relocation::spec(),
duke@0 2630 RELOC_DISP32);
duke@0 2631 %}
duke@0 2632
twisti@1137 2633 enc_class preserve_SP %{
twisti@1137 2634 debug_only(int off0 = cbuf.code_size());
twisti@1137 2635 MacroAssembler _masm(&cbuf);
twisti@1137 2636 // RBP is preserved across all calls, even compiled calls.
twisti@1137 2637 // Use it to preserve RSP in places where the callee might change the SP.
twisti@1137 2638 __ movptr(rbp, rsp);
twisti@1137 2639 debug_only(int off1 = cbuf.code_size());
twisti@1137 2640 assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
twisti@1137 2641 %}
twisti@1137 2642
twisti@1137 2643 enc_class restore_SP %{
twisti@1137 2644 MacroAssembler _masm(&cbuf);
twisti@1137 2645 __ movptr(rsp, rbp);
twisti@1137 2646 %}
twisti@1137 2647
duke@0 2648 enc_class Java_Static_Call(method meth)
duke@0 2649 %{
duke@0 2650 // JAVA STATIC CALL
duke@0 2651 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
duke@0 2652 // determine who we intended to call.
duke@0 2653 cbuf.set_inst_mark();
duke@0 2654 $$$emit8$primary;
duke@0 2655
duke@0 2656 if (!_method) {
duke@0 2657 emit_d32_reloc(cbuf,
duke@0 2658 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2659 runtime_call_Relocation::spec(),
duke@0 2660 RELOC_DISP32);
duke@0 2661 } else if (_optimized_virtual) {
duke@0 2662 emit_d32_reloc(cbuf,
duke@0 2663 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2664 opt_virtual_call_Relocation::spec(),
duke@0 2665 RELOC_DISP32);
duke@0 2666 } else {
duke@0 2667 emit_d32_reloc(cbuf,
duke@0 2668 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2669 static_call_Relocation::spec(),
duke@0 2670 RELOC_DISP32);
duke@0 2671 }
duke@0 2672 if (_method) {
duke@0 2673 // Emit stub for static call
duke@0 2674 emit_java_to_interp(cbuf);
duke@0 2675 }
duke@0 2676 %}
duke@0 2677
duke@0 2678 enc_class Java_Dynamic_Call(method meth)
duke@0 2679 %{
duke@0 2680 // JAVA DYNAMIC CALL
duke@0 2681 // !!!!!
duke@0 2682 // Generate "movq rax, -1", placeholder instruction to load oop-info
duke@0 2683 // emit_call_dynamic_prologue( cbuf );
duke@0 2684 cbuf.set_inst_mark();
duke@0 2685
duke@0 2686 // movq rax, -1
duke@0 2687 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2688 emit_opcode(cbuf, 0xB8 | RAX_enc);
duke@0 2689 emit_d64_reloc(cbuf,
duke@0 2690 (int64_t) Universe::non_oop_word(),
duke@0 2691 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
duke@0 2692 address virtual_call_oop_addr = cbuf.inst_mark();
duke@0 2693 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
duke@0 2694 // who we intended to call.
duke@0 2695 cbuf.set_inst_mark();
duke@0 2696 $$$emit8$primary;
duke@0 2697 emit_d32_reloc(cbuf,
duke@0 2698 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2699 virtual_call_Relocation::spec(virtual_call_oop_addr),
duke@0 2700 RELOC_DISP32);
duke@0 2701 %}
duke@0 2702
duke@0 2703 enc_class Java_Compiled_Call(method meth)
duke@0 2704 %{
duke@0 2705 // JAVA COMPILED CALL
duke@0 2706 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
duke@0 2707
duke@0 2708 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
duke@0 2709 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
duke@0 2710
duke@0 2711 // callq *disp(%rax)
duke@0 2712 cbuf.set_inst_mark();
duke@0 2713 $$$emit8$primary;
duke@0 2714 if (disp < 0x80) {
duke@0 2715 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
duke@0 2716 emit_d8(cbuf, disp); // Displacement
duke@0 2717 } else {
duke@0 2718 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
duke@0 2719 emit_d32(cbuf, disp); // Displacement
duke@0 2720 }
duke@0 2721 %}
duke@0 2722
duke@0 2723 enc_class reg_opc_imm(rRegI dst, immI8 shift)
duke@0 2724 %{
duke@0 2725 // SAL, SAR, SHR
duke@0 2726 int dstenc = $dst$$reg;
duke@0 2727 if (dstenc >= 8) {
duke@0 2728 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2729 dstenc -= 8;
duke@0 2730 }
duke@0 2731 $$$emit8$primary;
duke@0 2732 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2733 $$$emit8$shift$$constant;
duke@0 2734 %}
duke@0 2735
duke@0 2736 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
duke@0 2737 %{
duke@0 2738 // SAL, SAR, SHR
duke@0 2739 int dstenc = $dst$$reg;
duke@0 2740 if (dstenc < 8) {
duke@0 2741 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2742 } else {
duke@0 2743 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2744 dstenc -= 8;
duke@0 2745 }
duke@0 2746 $$$emit8$primary;
duke@0 2747 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2748 $$$emit8$shift$$constant;
duke@0 2749 %}
duke@0 2750
duke@0 2751 enc_class load_immI(rRegI dst, immI src)
duke@0 2752 %{
duke@0 2753 int dstenc = $dst$$reg;
duke@0 2754 if (dstenc >= 8) {
duke@0 2755 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2756 dstenc -= 8;
duke@0 2757 }
duke@0 2758 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2759 $$$emit32$src$$constant;
duke@0 2760 %}
duke@0 2761
duke@0 2762 enc_class load_immL(rRegL dst, immL src)
duke@0 2763 %{
duke@0 2764 int dstenc = $dst$$reg;
duke@0 2765 if (dstenc < 8) {
duke@0 2766 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2767 } else {
duke@0 2768 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2769 dstenc -= 8;
duke@0 2770 }
duke@0 2771 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2772 emit_d64(cbuf, $src$$constant);
duke@0 2773 %}
duke@0 2774
duke@0 2775 enc_class load_immUL32(rRegL dst, immUL32 src)
duke@0 2776 %{
duke@0 2777 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2778 int dstenc = $dst$$reg;
duke@0 2779 if (dstenc >= 8) {
duke@0 2780 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2781 dstenc -= 8;
duke@0 2782 }
duke@0 2783 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2784 $$$emit32$src$$constant;
duke@0 2785 %}
duke@0 2786
duke@0 2787 enc_class load_immL32(rRegL dst, immL32 src)
duke@0 2788 %{
duke@0 2789 int dstenc = $dst$$reg;
duke@0 2790 if (dstenc < 8) {
duke@0 2791 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2792 } else {
duke@0 2793 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2794 dstenc -= 8;
duke@0 2795 }
duke@0 2796 emit_opcode(cbuf, 0xC7);
duke@0 2797 emit_rm(cbuf, 0x03, 0x00, dstenc);
duke@0 2798 $$$emit32$src$$constant;
duke@0 2799 %}
duke@0 2800
duke@0 2801 enc_class load_immP31(rRegP dst, immP32 src)
duke@0 2802 %{
duke@0 2803 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2804 int dstenc = $dst$$reg;
duke@0 2805 if (dstenc >= 8) {
duke@0 2806 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2807 dstenc -= 8;
duke@0 2808 }
duke@0 2809 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2810 $$$emit32$src$$constant;
duke@0 2811 %}
duke@0 2812
duke@0 2813 enc_class load_immP(rRegP dst, immP src)
duke@0 2814 %{
duke@0 2815 int dstenc = $dst$$reg;
duke@0 2816 if (dstenc < 8) {
duke@0 2817 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2818 } else {
duke@0 2819 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2820 dstenc -= 8;
duke@0 2821 }
duke@0 2822 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2823 // This next line should be generated from ADLC
duke@0 2824 if ($src->constant_is_oop()) {
duke@0 2825 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
duke@0 2826 } else {
duke@0 2827 emit_d64(cbuf, $src$$constant);
duke@0 2828 }
duke@0 2829 %}
duke@0 2830
duke@0 2831 enc_class load_immF(regF dst, immF con)
duke@0 2832 %{
duke@0 2833 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2834 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2835 emit_float_constant(cbuf, $con$$constant);
duke@0 2836 %}
duke@0 2837
duke@0 2838 enc_class load_immD(regD dst, immD con)
duke@0 2839 %{
duke@0 2840 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2841 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2842 emit_double_constant(cbuf, $con$$constant);
duke@0 2843 %}
duke@0 2844
duke@0 2845 enc_class load_conF (regF dst, immF con) %{ // Load float constant
duke@0 2846 emit_opcode(cbuf, 0xF3);
duke@0 2847 if ($dst$$reg >= 8) {
duke@0 2848 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2849 }
duke@0 2850 emit_opcode(cbuf, 0x0F);
duke@0 2851 emit_opcode(cbuf, 0x10);
duke@0 2852 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2853 emit_float_constant(cbuf, $con$$constant);
duke@0 2854 %}
duke@0 2855
duke@0 2856 enc_class load_conD (regD dst, immD con) %{ // Load double constant
duke@0 2857 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
duke@0 2858 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 2859 if ($dst$$reg >= 8) {
duke@0 2860 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2861 }
duke@0 2862 emit_opcode(cbuf, 0x0F);
duke@0 2863 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 2864 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2865 emit_double_constant(cbuf, $con$$constant);
duke@0 2866 %}
duke@0 2867
duke@0 2868 // Encode a reg-reg copy. If it is useless, then empty encoding.
duke@0 2869 enc_class enc_copy(rRegI dst, rRegI src)
duke@0 2870 %{
duke@0 2871 encode_copy(cbuf, $dst$$reg, $src$$reg);
duke@0 2872 %}
duke@0 2873
duke@0 2874 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
duke@0 2875 enc_class enc_CopyXD( RegD dst, RegD src ) %{
duke@0 2876 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
duke@0 2877 %}
duke@0 2878
duke@0 2879 enc_class enc_copy_always(rRegI dst, rRegI src)
duke@0 2880 %{
duke@0 2881 int srcenc = $src$$reg;
duke@0 2882 int dstenc = $dst$$reg;
duke@0 2883
duke@0 2884 if (dstenc < 8) {
duke@0 2885 if (srcenc >= 8) {
duke@0 2886 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2887 srcenc -= 8;
duke@0 2888 }
duke@0 2889 } else {
duke@0 2890 if (srcenc < 8) {
duke@0 2891 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2892 } else {
duke@0 2893 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2894 srcenc -= 8;
duke@0 2895 }
duke@0 2896 dstenc -= 8;
duke@0 2897 }
duke@0 2898
duke@0 2899 emit_opcode(cbuf, 0x8B);
duke@0 2900 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2901 %}
duke@0 2902
duke@0 2903 enc_class enc_copy_wide(rRegL dst, rRegL src)
duke@0 2904 %{
duke@0 2905 int srcenc = $src$$reg;
duke@0 2906 int dstenc = $dst$$reg;
duke@0 2907
duke@0 2908 if (dstenc != srcenc) {
duke@0 2909 if (dstenc < 8) {
duke@0 2910 if (srcenc < 8) {
duke@0 2911 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2912 } else {
duke@0 2913 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2914 srcenc -= 8;
duke@0 2915 }
duke@0 2916 } else {
duke@0 2917 if (srcenc < 8) {
duke@0 2918 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2919 } else {
duke@0 2920 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2921 srcenc -= 8;
duke@0 2922 }
duke@0 2923 dstenc -= 8;
duke@0 2924 }
duke@0 2925 emit_opcode(cbuf, 0x8B);
duke@0 2926 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2927 }
duke@0 2928 %}
duke@0 2929
duke@0 2930 enc_class Con32(immI src)
duke@0 2931 %{
duke@0 2932 // Output immediate
duke@0 2933 $$$emit32$src$$constant;
duke@0 2934 %}
duke@0 2935
duke@0 2936 enc_class Con64(immL src)
duke@0 2937 %{
duke@0 2938 // Output immediate
duke@0 2939 emit_d64($src$$constant);
duke@0 2940 %}
duke@0 2941
duke@0 2942 enc_class Con32F_as_bits(immF src)
duke@0 2943 %{
duke@0 2944 // Output Float immediate bits
duke@0 2945 jfloat jf = $src$$constant;
duke@0 2946 jint jf_as_bits = jint_cast(jf);
duke@0 2947 emit_d32(cbuf, jf_as_bits);
duke@0 2948 %}
duke@0 2949
duke@0 2950 enc_class Con16(immI src)
duke@0 2951 %{
duke@0 2952 // Output immediate
duke@0 2953 $$$emit16$src$$constant;
duke@0 2954 %}
duke@0 2955
duke@0 2956 // How is this different from Con32??? XXX
duke@0 2957 enc_class Con_d32(immI src)
duke@0 2958 %{
duke@0 2959 emit_d32(cbuf,$src$$constant);
duke@0 2960 %}
duke@0 2961
duke@0 2962 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
duke@0 2963 // Output immediate memory reference
duke@0 2964 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
duke@0 2965 emit_d32(cbuf, 0x00);
duke@0 2966 %}
duke@0 2967
duke@0 2968 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
duke@0 2969 MacroAssembler masm(&cbuf);
duke@0 2970
duke@0 2971 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2972 Register dest_reg = as_Register($dest$$reg);
duke@0 2973 address table_base = masm.address_table_constant(_index2label);
duke@0 2974
duke@0 2975 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2976 // to do that and the compiler is using that register as one it can allocate.
duke@0 2977 // So we build it all by hand.
duke@0 2978 // Address index(noreg, switch_reg, Address::times_1);
duke@0 2979 // ArrayAddress dispatch(table, index);
duke@0 2980
duke@0 2981 Address dispatch(dest_reg, switch_reg, Address::times_1);
duke@0 2982
duke@0 2983 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 2984 masm.jmp(dispatch);
duke@0 2985 %}
duke@0 2986
duke@0 2987 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
duke@0 2988 MacroAssembler masm(&cbuf);
duke@0 2989
duke@0 2990 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2991 Register dest_reg = as_Register($dest$$reg);
duke@0 2992 address table_base = masm.address_table_constant(_index2label);
duke@0 2993
duke@0 2994 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2995 // to do that and the compiler is using that register as one it can allocate.
duke@0 2996 // So we build it all by hand.
duke@0 2997 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 2998 // ArrayAddress dispatch(table, index);
duke@0 2999
duke@0 3000 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 3001
duke@0 3002 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 3003 masm.jmp(dispatch);
duke@0 3004 %}
duke@0 3005
duke@0 3006 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
duke@0 3007 MacroAssembler masm(&cbuf);
duke@0 3008
duke@0 3009 Register switch_reg = as_Register($switch_val$$reg);
duke@0 3010 Register dest_reg = as_Register($dest$$reg);
duke@0 3011 address table_base = masm.address_table_constant(_index2label);
duke@0 3012
duke@0 3013 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 3014 // to do that and the compiler is using that register as one it can allocate.
duke@0 3015 // So we build it all by hand.
duke@0 3016 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 3017 // ArrayAddress dispatch(table, index);
duke@0 3018
duke@0 3019 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 3020 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 3021 masm.jmp(dispatch);
duke@0 3022
duke@0 3023 %}
duke@0 3024
duke@0 3025 enc_class lock_prefix()
duke@0 3026 %{
duke@0 3027 if (os::is_MP()) {
duke@0 3028 emit_opcode(cbuf, 0xF0); // lock
duke@0 3029 }
duke@0 3030 %}
duke@0 3031
duke@0 3032 enc_class REX_mem(memory mem)
duke@0 3033 %{
duke@0 3034 if ($mem$$base >= 8) {
duke@0 3035 if ($mem$$index < 8) {
duke@0 3036 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3037 } else {
duke@0 3038 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3039 }
duke@0 3040 } else {
duke@0 3041 if ($mem$$index >= 8) {
duke@0 3042 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3043 }
duke@0 3044 }
duke@0 3045 %}
duke@0 3046
duke@0 3047 enc_class REX_mem_wide(memory mem)
duke@0 3048 %{
duke@0 3049 if ($mem$$base >= 8) {
duke@0 3050 if ($mem$$index < 8) {
duke@0 3051 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3052 } else {
duke@0 3053 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3054 }
duke@0 3055 } else {
duke@0 3056 if ($mem$$index < 8) {
duke@0 3057 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3058 } else {
duke@0 3059 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3060 }
duke@0 3061 }
duke@0 3062 %}
duke@0 3063
duke@0 3064 // for byte regs
duke@0 3065 enc_class REX_breg(rRegI reg)
duke@0 3066 %{
duke@0 3067 if ($reg$$reg >= 4) {
duke@0 3068 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3069 }
duke@0 3070 %}
duke@0 3071
duke@0 3072 // for byte regs
duke@0 3073 enc_class REX_reg_breg(rRegI dst, rRegI src)
duke@0 3074 %{
duke@0 3075 if ($dst$$reg < 8) {
duke@0 3076 if ($src$$reg >= 4) {
duke@0 3077 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3078 }
duke@0 3079 } else {
duke@0 3080 if ($src$$reg < 8) {
duke@0 3081 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3082 } else {
duke@0 3083 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3084 }
duke@0 3085 }
duke@0 3086 %}
duke@0 3087
duke@0 3088 // for byte regs
duke@0 3089 enc_class REX_breg_mem(rRegI reg, memory mem)
duke@0 3090 %{
duke@0 3091 if ($reg$$reg < 8) {
duke@0 3092 if ($mem$$base < 8) {
duke@0 3093 if ($mem$$index >= 8) {
duke@0 3094 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3095 } else if ($reg$$reg >= 4) {
duke@0 3096 emit_opcode(cbuf, Assembler::REX);
duke@0 3097 }
duke@0 3098 } else {
duke@0 3099 if ($mem$$index < 8) {
duke@0 3100 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3101 } else {
duke@0 3102 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3103 }
duke@0 3104 }
duke@0 3105 } else {
duke@0 3106 if ($mem$$base < 8) {
duke@0 3107 if ($mem$$index < 8) {
duke@0 3108 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3109 } else {
duke@0 3110 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3111 }
duke@0 3112 } else {
duke@0 3113 if ($mem$$index < 8) {
duke@0 3114 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3115 } else {
duke@0 3116 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3117 }
duke@0 3118 }
duke@0 3119 }
duke@0 3120 %}
duke@0 3121
duke@0 3122 enc_class REX_reg(rRegI reg)
duke@0 3123 %{
duke@0 3124 if ($reg$$reg >= 8) {
duke@0 3125 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3126 }
duke@0 3127 %}
duke@0 3128
duke@0 3129 enc_class REX_reg_wide(rRegI reg)
duke@0 3130 %{
duke@0 3131 if ($reg$$reg < 8) {
duke@0 3132 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3133 } else {
duke@0 3134 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3135 }
duke@0 3136 %}
duke@0 3137
duke@0 3138 enc_class REX_reg_reg(rRegI dst, rRegI src)
duke@0 3139 %{
duke@0 3140 if ($dst$$reg < 8) {
duke@0 3141 if ($src$$reg >= 8) {
duke@0 3142 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3143 }
duke@0 3144 } else {
duke@0 3145 if ($src$$reg < 8) {
duke@0 3146 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3147 } else {
duke@0 3148 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3149 }
duke@0 3150 }
duke@0 3151 %}
duke@0 3152
duke@0 3153 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
duke@0 3154 %{
duke@0 3155 if ($dst$$reg < 8) {
duke@0 3156 if ($src$$reg < 8) {
duke@0 3157 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3158 } else {
duke@0 3159 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3160 }
duke@0 3161 } else {
duke@0 3162 if ($src$$reg < 8) {
duke@0 3163 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3164 } else {
duke@0 3165 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3166 }
duke@0 3167 }
duke@0 3168 %}
duke@0 3169
duke@0 3170 enc_class REX_reg_mem(rRegI reg, memory mem)
duke@0 3171 %{
duke@0 3172 if ($reg$$reg < 8) {
duke@0 3173 if ($mem$$base < 8) {
duke@0 3174 if ($mem$$index >= 8) {
duke@0 3175 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3176 }
duke@0 3177 } else {
duke@0 3178 if ($mem$$index < 8) {
duke@0 3179 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3180 } else {
duke@0 3181 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3182 }
duke@0 3183 }
duke@0 3184 } else {
duke@0 3185 if ($mem$$base < 8) {
duke@0 3186 if ($mem$$index < 8) {
duke@0 3187 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3188 } else {
duke@0 3189 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3190 }
duke@0 3191 } else {
duke@0 3192 if ($mem$$index < 8) {
duke@0 3193 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3194 } else {
duke@0 3195 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3196 }
duke@0 3197 }
duke@0 3198 }
duke@0 3199 %}
duke@0 3200
duke@0 3201 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
duke@0 3202 %{
duke@0 3203 if ($reg$$reg < 8) {
duke@0 3204 if ($mem$$base < 8) {
duke@0 3205 if ($mem$$index < 8) {
duke@0 3206 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3207 } else {
duke@0 3208 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3209 }
duke@0 3210 } else {
duke@0 3211 if ($mem$$index < 8) {
duke@0 3212 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3213 } else {
duke@0 3214 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3215 }
duke@0 3216 }
duke@0 3217 } else {
duke@0 3218 if ($mem$$base < 8) {
duke@0 3219 if ($mem$$index < 8) {
duke@0 3220 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3221 } else {
duke@0 3222 emit_opcode(cbuf, Assembler::REX_WRX);
duke@0 3223 }
duke@0 3224 } else {
duke@0 3225 if ($mem$$index < 8) {
duke@0 3226 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3227 } else {
duke@0 3228 emit_opcode(cbuf, Assembler::REX_WRXB);
duke@0 3229 }
duke@0 3230 }
duke@0 3231 }
duke@0 3232 %}
duke@0 3233
duke@0 3234 enc_class reg_mem(rRegI ereg, memory mem)
duke@0 3235 %{
duke@0 3236 // High registers handle in encode_RegMem
duke@0 3237 int reg = $ereg$$reg;
duke@0 3238 int base = $mem$$base;
duke@0 3239 int index = $mem$$index;
duke@0 3240 int scale = $mem$$scale;
duke@0 3241 int disp = $mem$$disp;
duke@0 3242 bool disp_is_oop = $mem->disp_is_oop();
duke@0 3243
duke@0 3244 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
duke@0 3245 %}
duke@0 3246
duke@0 3247 enc_class RM_opc_mem(immI rm_opcode, memory mem)
duke@0 3248 %{
duke@0 3249 int rm_byte_opcode = $rm_opcode$$constant;
duke@0 3250
duke@0 3251 // High registers handle in encode_RegMem
duke@0 3252 int base = $mem$$base;
duke@0 3253 int index = $mem$$index;
duke@0 3254 int scale = $mem$$scale;
duke@0 3255 int displace = $mem$$disp;
duke@0 3256
duke@0 3257 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
duke@0 3258 // working with static
duke@0 3259 // globals
duke@0 3260 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
duke@0 3261 disp_is_oop);
duke@0 3262 %}
duke@0 3263
duke@0 3264 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
duke@0 3265 %{
duke@0 3266 int reg_encoding = $dst$$reg;
duke@0 3267 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
duke@0 3268 int index = 0x04; // 0x04 indicates no index
duke@0 3269 int scale = 0x00; // 0x00 indicates no scale
duke@0 3270 int displace = $src1$$constant; // 0x00 indicates no displacement
duke@0 3271 bool disp_is_oop = false;
duke@0 3272 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
duke@0 3273 disp_is_oop);
duke@0 3274 %}
duke@0 3275
duke@0 3276 enc_class neg_reg(rRegI dst)
duke@0 3277 %{
duke@0 3278 int dstenc = $dst$$reg;
duke@0 3279 if (dstenc >= 8) {
duke@0 3280 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3281 dstenc -= 8;
duke@0 3282 }
duke@0 3283 // NEG $dst
duke@0 3284 emit_opcode(cbuf, 0xF7);
duke@0 3285 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3286 %}
duke@0 3287
duke@0 3288 enc_class neg_reg_wide(rRegI dst)
duke@0 3289 %{
duke@0 3290 int dstenc = $dst$$reg;
duke@0 3291 if (dstenc < 8) {
duke@0 3292 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3293 } else {
duke@0 3294 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3295 dstenc -= 8;
duke@0 3296 }
duke@0 3297 // NEG $dst
duke@0 3298 emit_opcode(cbuf, 0xF7);
duke@0 3299 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3300 %}
duke@0 3301
duke@0 3302 enc_class setLT_reg(rRegI dst)
duke@0 3303 %{
duke@0 3304 int dstenc = $dst$$reg;
duke@0 3305 if (dstenc >= 8) {
duke@0 3306 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3307 dstenc -= 8;
duke@0 3308 } else if (dstenc >= 4) {
duke@0 3309 emit_opcode(cbuf, Assembler::REX);
duke@0 3310 }
duke@0 3311 // SETLT $dst
duke@0 3312 emit_opcode(cbuf, 0x0F);
duke@0 3313 emit_opcode(cbuf, 0x9C);
duke@0 3314 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3315 %}
duke@0 3316
duke@0 3317 enc_class setNZ_reg(rRegI dst)
duke@0 3318 %{
duke@0 3319 int dstenc = $dst$$reg;
duke@0 3320 if (dstenc >= 8) {
duke@0 3321 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3322 dstenc -= 8;
duke@0 3323 } else if (dstenc >= 4) {
duke@0 3324 emit_opcode(cbuf, Assembler::REX);
duke@0 3325 }
duke@0 3326 // SETNZ $dst
duke@0 3327 emit_opcode(cbuf, 0x0F);
duke@0 3328 emit_opcode(cbuf, 0x95);
duke@0 3329 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3330 %}
duke@0 3331
duke@0 3332 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
duke@0 3333 rcx_RegI tmp)
duke@0 3334 %{
duke@0 3335 // cadd_cmpLT
duke@0 3336
duke@0 3337 int tmpReg = $tmp$$reg;
duke@0 3338
duke@0 3339 int penc = $p$$reg;
duke@0 3340 int qenc = $q$$reg;
duke@0 3341 int yenc = $y$$reg;
duke@0 3342
duke@0 3343 // subl $p,$q
duke@0 3344 if (penc < 8) {
duke@0 3345 if (qenc >= 8) {
duke@0 3346 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3347 }
duke@0 3348 } else {
duke@0 3349 if (qenc < 8) {
duke@0 3350 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3351 } else {
duke@0 3352 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3353 }
duke@0 3354 }
duke@0 3355 emit_opcode(cbuf, 0x2B);
duke@0 3356 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
duke@0 3357
duke@0 3358 // sbbl $tmp, $tmp
duke@0 3359 emit_opcode(cbuf, 0x1B);
duke@0 3360 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
duke@0 3361
duke@0 3362 // andl $tmp, $y
duke@0 3363 if (yenc >= 8) {
duke@0 3364 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3365 }
duke@0 3366 emit_opcode(cbuf, 0x23);
duke@0 3367 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
duke@0 3368
duke@0 3369 // addl $p,$tmp
duke@0 3370 if (penc >= 8) {
duke@0 3371 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3372 }
duke@0 3373 emit_opcode(cbuf, 0x03);
duke@0 3374 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
duke@0 3375 %}
duke@0 3376
duke@0 3377 // Compare the lonogs and set -1, 0, or 1 into dst
duke@0 3378 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
duke@0 3379 %{
duke@0 3380 int src1enc = $src1$$reg;
duke@0 3381 int src2enc = $src2$$reg;
duke@0 3382 int dstenc = $dst$$reg;
duke@0 3383
duke@0 3384 // cmpq $src1, $src2
duke@0 3385 if (src1enc < 8) {
duke@0 3386 if (src2enc < 8) {
duke@0 3387 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3388 } else {
duke@0 3389 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3390 }
duke@0 3391 } else {
duke@0 3392 if (src2enc < 8) {
duke@0 3393 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3394 } else {
duke@0 3395 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3396 }
duke@0 3397 }
duke@0 3398 emit_opcode(cbuf, 0x3B);
duke@0 3399 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
duke@0 3400
duke@0 3401 // movl $dst, -1
duke@0 3402 if (dstenc >= 8) {
duke@0 3403 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3404 }
duke@0 3405 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 3406 emit_d32(cbuf, -1);
duke@0 3407
duke@0 3408 // jl,s done
duke@0 3409 emit_opcode(cbuf, 0x7C);
duke@0 3410 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 3411
duke@0 3412 // setne $dst
duke@0 3413 if (dstenc >= 4) {
duke@0 3414 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3415 }
duke@0 3416 emit_opcode(cbuf, 0x0F);
duke@0 3417 emit_opcode(cbuf, 0x95);
duke@0 3418 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 3419
duke@0 3420 // movzbl $dst, $dst
duke@0 3421 if (dstenc >= 4) {
duke@0 3422 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 3423 }
duke@0 3424 emit_opcode(cbuf, 0x0F);
duke@0 3425 emit_opcode(cbuf, 0xB6);
duke@0 3426 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 3427 %}
duke@0 3428
duke@0 3429 enc_class Push_ResultXD(regD dst) %{
duke@0 3430 int dstenc = $dst$$reg;
duke@0 3431
duke@0 3432 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
duke@0 3433
duke@0 3434 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
duke@0 3435 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 3436 if (dstenc >= 8) {
duke@0 3437 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3438 }
duke@0 3439 emit_opcode (cbuf, 0x0F );
duke@0 3440 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
duke@0 3441 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3442
duke@0 3443 // add rsp,8
duke@0 3444 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3445 emit_opcode(cbuf,0x83);
duke@0 3446 emit_rm(cbuf,0x3, 0x0, RSP_enc);
duke@0 3447 emit_d8(cbuf,0x08);
duke@0 3448 %}
duke@0 3449
duke@0 3450 enc_class Push_SrcXD(regD src) %{
duke@0 3451 int srcenc = $src$$reg;
duke@0 3452
duke@0 3453 // subq rsp,#8
duke@0 3454 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3455 emit_opcode(cbuf, 0x83);
duke@0 3456 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3457 emit_d8(cbuf, 0x8);
duke@0 3458
duke@0 3459 // movsd [rsp],src
duke@0 3460 emit_opcode(cbuf, 0xF2);
duke@0 3461 if (srcenc >= 8) {
duke@0 3462 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3463 }
duke@0 3464 emit_opcode(cbuf, 0x0F);
duke@0 3465 emit_opcode(cbuf, 0x11);
duke@0 3466 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3467
duke@0 3468 // fldd [rsp]
duke@0 3469 emit_opcode(cbuf, 0x66);
duke@0 3470 emit_opcode(cbuf, 0xDD);
duke@0 3471 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
duke@0 3472 %}
duke@0 3473
duke@0 3474
duke@0 3475 enc_class movq_ld(regD dst, memory mem) %{
duke@0 3476 MacroAssembler _masm(&cbuf);
twisti@624 3477 __ movq($dst$$XMMRegister, $mem$$Address);
duke@0 3478 %}
duke@0 3479
duke@0 3480 enc_class movq_st(memory mem, regD src) %{
duke@0 3481 MacroAssembler _masm(&cbuf);
twisti@624 3482 __ movq($mem$$Address, $src$$XMMRegister);
duke@0 3483 %}
duke@0 3484
duke@0 3485 enc_class pshufd_8x8(regF dst, regF src) %{
duke@0 3486 MacroAssembler _masm(&cbuf);
duke@0 3487
duke@0 3488 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
duke@0 3489 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
duke@0 3490 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
duke@0 3491 %}
duke@0 3492
duke@0 3493 enc_class pshufd_4x16(regF dst, regF src) %{
duke@0 3494 MacroAssembler _masm(&cbuf);
duke@0 3495
duke@0 3496 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
duke@0 3497 %}
duke@0 3498
duke@0 3499 enc_class pshufd(regD dst, regD src, int mode) %{
duke@0 3500 MacroAssembler _masm(&cbuf);
duke@0 3501
duke@0 3502 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
duke@0 3503 %}
duke@0 3504
duke@0 3505 enc_class pxor(regD dst, regD src) %{
duke@0 3506 MacroAssembler _masm(&cbuf);
duke@0 3507
duke@0 3508 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
duke@0 3509 %}
duke@0 3510
duke@0 3511 enc_class mov_i2x(regD dst, rRegI src) %{
duke@0 3512 MacroAssembler _masm(&cbuf);
duke@0 3513
duke@0 3514 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
duke@0 3515 %}
duke@0 3516
duke@0 3517 // obj: object to lock
duke@0 3518 // box: box address (header location) -- killed
duke@0 3519 // tmp: rax -- killed
duke@0 3520 // scr: rbx -- killed
duke@0 3521 //
duke@0 3522 // What follows is a direct transliteration of fast_lock() and fast_unlock()
duke@0 3523 // from i486.ad. See that file for comments.
duke@0 3524 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
duke@0 3525 // use the shorter encoding. (Movl clears the high-order 32-bits).
duke@0 3526
duke@0 3527
duke@0 3528 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
duke@0 3529 %{
duke@0 3530 Register objReg = as_Register((int)$obj$$reg);
duke@0 3531 Register boxReg = as_Register((int)$box$$reg);
duke@0 3532 Register tmpReg = as_Register($tmp$$reg);
duke@0 3533 Register scrReg = as_Register($scr$$reg);
duke@0 3534 MacroAssembler masm(&cbuf);
duke@0 3535
duke@0 3536 // Verify uniqueness of register assignments -- necessary but not sufficient
duke@0 3537 assert (objReg != boxReg && objReg != tmpReg &&
duke@0 3538 objReg != scrReg && tmpReg != scrReg, "invariant") ;
duke@0 3539
duke@0 3540 if (_counters != NULL) {
duke@0 3541 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
duke@0 3542 }
duke@0 3543 if (EmitSync & 1) {
never@304 3544 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@304 3545 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@304 3546 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
duke@0 3547 } else
duke@0 3548 if (EmitSync & 2) {
duke@0 3549 Label DONE_LABEL;
duke@0 3550 if (UseBiasedLocking) {
duke@0 3551 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
duke@0 3552 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
duke@0 3553 }
never@304 3554 // QQQ was movl...
never@304 3555 masm.movptr(tmpReg, 0x1);
never@304 3556 masm.orptr(tmpReg, Address(objReg, 0));
never@304 3557 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3558 if (os::is_MP()) {
duke@0 3559 masm.lock();
duke@0 3560 }
never@304 3561 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3562 masm.jcc(Assembler::equal, DONE_LABEL);
duke@0 3563
duke@0 3564 // Recursive locking
never@304 3565 masm.subptr(tmpReg, rsp);
never@304 3566 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 3567 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3568
duke@0 3569 masm.bind(DONE_LABEL);
duke@0 3570 masm.nop(); // avoid branch to branch
duke@0 3571 } else {
duke@0 3572 Label DONE_LABEL, IsInflated, Egress;
duke@0 3573
never@304 3574 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@304 3575 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
never@304 3576 masm.jcc (Assembler::notZero, IsInflated) ;
never@304 3577
duke@0 3578 // it's stack-locked, biased or neutral
duke@0 3579 // TODO: optimize markword triage order to reduce the number of
duke@0 3580 // conditional branches in the most common cases.
duke@0 3581 // Beware -- there's a subtle invariant that fetch of the markword
duke@0 3582 // at [FETCH], below, will never observe a biased encoding (*101b).
duke@0 3583 // If this invariant is not held we'll suffer exclusion (safety) failure.
duke@0 3584
kvn@420 3585 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3586 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
never@304 3587 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
duke@0 3588 }
duke@0 3589
never@304 3590 // was q will it destroy high?
never@304 3591 masm.orl (tmpReg, 1) ;
never@304 3592 masm.movptr(Address(boxReg, 0), tmpReg) ;
never@304 3593 if (os::is_MP()) { masm.lock(); }
never@304 3594 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3595 if (_counters != NULL) {
duke@0 3596 masm.cond_inc32(Assembler::equal,
duke@0 3597 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3598 }
duke@0 3599 masm.jcc (Assembler::equal, DONE_LABEL);
duke@0 3600
duke@0 3601 // Recursive locking
never@304 3602 masm.subptr(tmpReg, rsp);
never@304 3603 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 3604 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3605 if (_counters != NULL) {
duke@0 3606 masm.cond_inc32(Assembler::equal,
duke@0 3607 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3608 }
duke@0 3609 masm.jmp (DONE_LABEL) ;
duke@0 3610
duke@0 3611 masm.bind (IsInflated) ;
duke@0 3612 // It's inflated
duke@0 3613
duke@0 3614 // TODO: someday avoid the ST-before-CAS penalty by
duke@0 3615 // relocating (deferring) the following ST.
duke@0 3616 // We should also think about trying a CAS without having
duke@0 3617 // fetched _owner. If the CAS is successful we may
duke@0 3618 // avoid an RTO->RTS upgrade on the $line.
never@304 3619 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@304 3620 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@304 3621
never@304 3622 masm.mov (boxReg, tmpReg) ;
never@304 3623 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@304 3624 masm.testptr(tmpReg, tmpReg) ;
never@304 3625 masm.jcc (Assembler::notZero, DONE_LABEL) ;
duke@0 3626
duke@0 3627 // It's inflated and appears unlocked
never@304 3628 if (os::is_MP()) { masm.lock(); }
never@304 3629 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@0 3630 // Intentional fall-through into DONE_LABEL ...
duke@0 3631
duke@0 3632 masm.bind (DONE_LABEL) ;
duke@0 3633 masm.nop () ; // avoid jmp to jmp
duke@0 3634 }
duke@0 3635 %}
duke@0 3636
duke@0 3637 // obj: object to unlock
duke@0 3638 // box: box address (displaced header location), killed
duke@0 3639 // RBX: killed tmp; cannot be obj nor box
duke@0 3640 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
duke@0 3641 %{
duke@0 3642
duke@0 3643 Register objReg = as_Register($obj$$reg);
duke@0 3644 Register boxReg = as_Register($box$$reg);
duke@0 3645 Register tmpReg = as_Register($tmp$$reg);
duke@0 3646 MacroAssembler masm(&cbuf);
duke@0 3647
never@304 3648 if (EmitSync & 4) {
never@304 3649 masm.cmpptr(rsp, 0) ;
duke@0 3650 } else
duke@0 3651 if (EmitSync & 8) {
duke@0 3652 Label DONE_LABEL;
duke@0 3653 if (UseBiasedLocking) {
duke@0 3654 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3655 }
duke@0 3656
duke@0 3657 // Check whether the displaced header is 0
duke@0 3658 //(=> recursive unlock)
never@304 3659 masm.movptr(tmpReg, Address(boxReg, 0));
never@304 3660 masm.testptr(tmpReg, tmpReg);
duke@0 3661 masm.jcc(Assembler::zero, DONE_LABEL);
duke@0 3662
duke@0 3663 // If not recursive lock, reset the header to displaced header
duke@0 3664 if (os::is_MP()) {
duke@0 3665 masm.lock();
duke@0 3666 }
never@304 3667 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3668 masm.bind(DONE_LABEL);
duke@0 3669 masm.nop(); // avoid branch to branch
duke@0 3670 } else {
duke@0 3671 Label DONE_LABEL, Stacked, CheckSucc ;
duke@0 3672
kvn@420 3673 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3674 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3675 }
never@304 3676
never@304 3677 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@304 3678 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
never@304 3679 masm.jcc (Assembler::zero, DONE_LABEL) ;
never@304 3680 masm.testl (tmpReg, 0x02) ;
never@304 3681 masm.jcc (Assembler::zero, Stacked) ;
never@304 3682
duke@0 3683 // It's inflated
never@304 3684 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@304 3685 masm.xorptr(boxReg, r15_thread) ;
never@304 3686 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
never@304 3687 masm.jcc (Assembler::notZero, DONE_LABEL) ;
never@304 3688 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
never@304 3689 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
never@304 3690 masm.jcc (Assembler::notZero, CheckSucc) ;
never@304 3691 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
never@304 3692 masm.jmp (DONE_LABEL) ;
never@304 3693
never@304 3694 if ((EmitSync & 65536) == 0) {
duke@0 3695 Label LSuccess, LGoSlowPath ;
duke@0 3696 masm.bind (CheckSucc) ;
never@304 3697 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3698 masm.jcc (Assembler::zero, LGoSlowPath) ;
duke@0 3699
duke@0 3700 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
duke@0 3701 // the explicit ST;MEMBAR combination, but masm doesn't currently support
duke@0 3702 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
duke@0 3703 // are all faster when the write buffer is populated.
never@304 3704 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3705 if (os::is_MP()) {
never@304 3706 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
duke@0 3707 }
never@304 3708 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3709 masm.jcc (Assembler::notZero, LSuccess) ;
duke@0 3710
never@304 3711 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
duke@0 3712 if (os::is_MP()) { masm.lock(); }
never@304 3713 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
duke@0 3714 masm.jcc (Assembler::notEqual, LSuccess) ;
duke@0 3715 // Intentional fall-through into slow-path
duke@0 3716
duke@0 3717 masm.bind (LGoSlowPath) ;
duke@0 3718 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
duke@0 3719 masm.jmp (DONE_LABEL) ;
duke@0 3720
duke@0 3721 masm.bind (LSuccess) ;
duke@0 3722 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
duke@0 3723 masm.jmp (DONE_LABEL) ;
duke@0 3724 }
duke@0 3725
never@304 3726 masm.bind (Stacked) ;
never@304 3727 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
never@304 3728 if (os::is_MP()) { masm.lock(); }
never@304 3729 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3730
duke@0 3731 if (EmitSync & 65536) {
duke@0 3732 masm.bind (CheckSucc) ;
duke@0 3733 }
duke@0 3734 masm.bind(DONE_LABEL);
duke@0 3735 if (EmitSync & 32768) {
duke@0 3736 masm.nop(); // avoid branch to branch
duke@0 3737 }
duke@0 3738 }
duke@0 3739 %}
duke@0 3740
rasbold@169 3741
duke@0 3742 enc_class enc_rethrow()
duke@0 3743 %{
duke@0 3744 cbuf.set_inst_mark();
duke@0 3745 emit_opcode(cbuf, 0xE9); // jmp entry
duke@0 3746 emit_d32_reloc(cbuf,
duke@0 3747 (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4),
duke@0 3748 runtime_call_Relocation::spec(),
duke@0 3749 RELOC_DISP32);
duke@0 3750 %}
duke@0 3751
duke@0 3752 enc_class absF_encoding(regF dst)
duke@0 3753 %{
duke@0 3754 int dstenc = $dst$$reg;
never@304 3755 address signmask_address = (address) StubRoutines::x86::float_sign_mask();
duke@0 3756
duke@0 3757 cbuf.set_inst_mark();
duke@0 3758 if (dstenc >= 8) {
duke@0 3759 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3760 dstenc -= 8;
duke@0 3761 }
duke@0 3762 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3763 emit_opcode(cbuf, 0x0F);
duke@0 3764 emit_opcode(cbuf, 0x54);
duke@0 3765 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3766 emit_d32_reloc(cbuf, signmask_address);
duke@0 3767 %}
duke@0 3768
duke@0 3769 enc_class absD_encoding(regD dst)
duke@0 3770 %{
duke@0 3771 int dstenc = $dst$$reg;
never@304 3772 address signmask_address = (address) StubRoutines::x86::double_sign_mask();
duke@0 3773
duke@0 3774 cbuf.set_inst_mark();
duke@0 3775 emit_opcode(cbuf, 0x66);
duke@0 3776 if (dstenc >= 8) {
duke@0 3777 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3778 dstenc -= 8;
duke@0 3779 }
duke@0 3780 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3781 emit_opcode(cbuf, 0x0F);
duke@0 3782 emit_opcode(cbuf, 0x54);
duke@0 3783 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3784 emit_d32_reloc(cbuf, signmask_address);
duke@0 3785 %}
duke@0 3786
duke@0 3787 enc_class negF_encoding(regF dst)
duke@0 3788 %{
duke@0 3789 int dstenc = $dst$$reg;
never@304 3790 address signflip_address = (address) StubRoutines::x86::float_sign_flip();
duke@0 3791
duke@0 3792 cbuf.set_inst_mark();
duke@0 3793 if (dstenc >= 8) {
duke@0 3794 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3795 dstenc -= 8;
duke@0 3796 }
duke@0 3797 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3798 emit_opcode(cbuf, 0x0F);
duke@0 3799 emit_opcode(cbuf, 0x57);
duke@0 3800 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3801 emit_d32_reloc(cbuf, signflip_address);
duke@0 3802 %}
duke@0 3803
duke@0 3804 enc_class negD_encoding(regD dst)
duke@0 3805 %{
duke@0 3806 int dstenc = $dst$$reg;
never@304 3807 address signflip_address = (address) StubRoutines::x86::double_sign_flip();
duke@0 3808
duke@0 3809 cbuf.set_inst_mark();
duke@0 3810 emit_opcode(cbuf, 0x66);
duke@0 3811 if (dstenc >= 8) {
duke@0 3812 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3813 dstenc -= 8;
duke@0 3814 }
duke@0 3815 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3816 emit_opcode(cbuf, 0x0F);
duke@0 3817 emit_opcode(cbuf, 0x57);
duke@0 3818 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101
duke@0 3819 emit_d32_reloc(cbuf, signflip_address);
duke@0 3820 %}
duke@0 3821
duke@0 3822 enc_class f2i_fixup(rRegI dst, regF src)
duke@0 3823 %{
duke@0 3824 int dstenc = $dst$$reg;
duke@0 3825 int srcenc = $src$$reg;
duke@0 3826
duke@0 3827 // cmpl $dst, #0x80000000
duke@0 3828 if (dstenc >= 8) {
duke@0 3829 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3830 }
duke@0 3831 emit_opcode(cbuf, 0x81);
duke@0 3832 emit_rm(cbuf, 0x3, 0x7, dstenc & 7);
duke@0 3833 emit_d32(cbuf, 0x80000000);
duke@0 3834
duke@0 3835 // jne,s done
duke@0 3836 emit_opcode(cbuf, 0x75);
duke@0 3837 if (srcenc < 8 && dstenc < 8) {
duke@0 3838 emit_d8(cbuf, 0xF);
duke@0 3839 } else if (srcenc >= 8 && dstenc >= 8) {
duke@0 3840 emit_d8(cbuf, 0x11);
duke@0 3841 } else {
duke@0 3842 emit_d8(cbuf, 0x10);
duke@0 3843 }
duke@0 3844
duke@0 3845 // subq rsp, #8
duke@0 3846 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3847 emit_opcode(cbuf, 0x83);
duke@0 3848 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3849 emit_d8(cbuf, 8);
duke@0 3850
duke@0 3851 // movss [rsp], $src
duke@0 3852 emit_opcode(cbuf, 0xF3);
duke@0 3853 if (srcenc >= 8) {
duke@0 3854 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3855 }
duke@0 3856 emit_opcode(cbuf, 0x0F);
duke@0 3857 emit_opcode(cbuf, 0x11);
duke@0 3858 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes
duke@0 3859
duke@0 3860 // call f2i_fixup
duke@0 3861 cbuf.set_inst_mark();
duke@0 3862 emit_opcode(cbuf, 0xE8);
duke@0 3863 emit_d32_reloc(cbuf,
duke@0 3864 (int)
never@304 3865 (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
duke@0 3866 runtime_call_Relocation::spec(),
duke@0 3867 RELOC_DISP32);
duke@0 3868
duke@0 3869 // popq $dst
duke@0 3870 if (dstenc >= 8) {
duke@0 3871 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3872 }
duke@0 3873 emit_opcode(cbuf, 0x58 | (dstenc & 7));
duke@0 3874
duke@0 3875 // done:
duke@0 3876 %}
duke@0 3877
duke@0 3878 enc_class f2l_fixup(rRegL dst, regF src)
duke@0 3879 %{
duke@0 3880 int dstenc = $dst$$reg;
duke@0 3881 int srcenc = $src$$reg;
never@304 3882 address const_address = (address) StubRoutines::x86::double_sign_flip();
duke@0 3883
duke@0 3884 // cmpq $dst, [0x8000000000000000]
duke@0 3885 cbuf.set_inst_mark();
duke@0 3886 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 3887 emit_opcode(cbuf, 0x39);
duke@0 3888 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 3889 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101
duke@0 3890 emit_d32_reloc(cbuf, const_address);
duke@0 3891
duke@0 3892
duke@0 3893 // jne,s done
duke@0 3894 emit_opcode(cbuf, 0x75);
duke@0 3895 if (srcenc < 8 && dstenc < 8) {
duke@0 3896 emit_d8(cbuf, 0xF);
duke@0 3897 } else if (srcenc >= 8 && dstenc >= 8) {
duke@0 3898 emit_d8(cbuf, 0x11);
duke@0 3899 } else {
duke@0 3900 emit_d8(cbuf, 0x10);
duke@0 3901 }
duke@0 3902
duke@0 3903 // subq rsp, #8
duke@0 3904 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3905 emit_opcode(cbuf, 0x83);
duke@0 3906 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3907 emit_d8(cbuf, 8);
duke@0 3908
duke@0 3909 // movss [rsp], $src
duke@0 3910 emit_opcode(cbuf, 0xF3);
duke@0 3911 if (srcenc >= 8) {