annotate src/cpu/x86/vm/x86_64.ad @ 2953:127b3692c168

7116452: Add support for AVX instructions Summary: Added support for AVX extension to the x86 instruction set. Reviewed-by: never
author kvn
date Wed, 14 Dec 2011 14:54:38 -0800
parents db2e64ca2d5a
children 65149e74c706
rev   line source
duke@0 1 //
kvn@2167 2 // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
duke@0 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 //
duke@0 5 // This code is free software; you can redistribute it and/or modify it
duke@0 6 // under the terms of the GNU General Public License version 2 only, as
duke@0 7 // published by the Free Software Foundation.
duke@0 8 //
duke@0 9 // This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 // version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 // accompanied this code).
duke@0 14 //
duke@0 15 // You should have received a copy of the GNU General Public License version
duke@0 16 // 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 //
trims@1472 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 // or visit www.oracle.com if you need additional information or have any
trims@1472 21 // questions.
duke@0 22 //
duke@0 23 //
duke@0 24
duke@0 25 // AMD64 Architecture Description File
duke@0 26
duke@0 27 //----------REGISTER DEFINITION BLOCK------------------------------------------
duke@0 28 // This information is used by the matcher and the register allocator to
duke@0 29 // describe individual registers and classes of registers within the target
duke@0 30 // archtecture.
duke@0 31
duke@0 32 register %{
duke@0 33 //----------Architecture Description Register Definitions----------------------
duke@0 34 // General Registers
duke@0 35 // "reg_def" name ( register save type, C convention save type,
duke@0 36 // ideal register type, encoding );
duke@0 37 // Register Save Types:
duke@0 38 //
duke@0 39 // NS = No-Save: The register allocator assumes that these registers
duke@0 40 // can be used without saving upon entry to the method, &
duke@0 41 // that they do not need to be saved at call sites.
duke@0 42 //
duke@0 43 // SOC = Save-On-Call: The register allocator assumes that these registers
duke@0 44 // can be used without saving upon entry to the method,
duke@0 45 // but that they must be saved at call sites.
duke@0 46 //
duke@0 47 // SOE = Save-On-Entry: The register allocator assumes that these registers
duke@0 48 // must be saved before using them upon entry to the
duke@0 49 // method, but they do not need to be saved at call
duke@0 50 // sites.
duke@0 51 //
duke@0 52 // AS = Always-Save: The register allocator assumes that these registers
duke@0 53 // must be saved before using them upon entry to the
duke@0 54 // method, & that they must be saved at call sites.
duke@0 55 //
duke@0 56 // Ideal Register Type is used to determine how to save & restore a
duke@0 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
duke@0 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
duke@0 59 //
duke@0 60 // The encoding number is the actual bit-pattern placed into the opcodes.
duke@0 61
duke@0 62 // General Registers
duke@0 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
duke@0 64 // used as byte registers)
duke@0 65
duke@0 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
duke@0 67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
duke@0 68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
duke@0 69
duke@0 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
duke@0 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
duke@0 72
duke@0 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
duke@0 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
duke@0 75
duke@0 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
duke@0 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
duke@0 78
duke@0 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
duke@0 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
duke@0 81
duke@0 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
duke@0 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
duke@0 84
duke@0 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
duke@0 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
duke@0 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
duke@0 88
duke@0 89 #ifdef _WIN64
duke@0 90
duke@0 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
duke@0 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 93
duke@0 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
duke@0 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 96
duke@0 97 #else
duke@0 98
duke@0 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
duke@0 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 101
duke@0 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
duke@0 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 104
duke@0 105 #endif
duke@0 106
duke@0 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
duke@0 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
duke@0 109
duke@0 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
duke@0 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
duke@0 112
duke@0 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
duke@0 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
duke@0 115
duke@0 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
duke@0 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
duke@0 118
duke@0 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
duke@0 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
duke@0 121
duke@0 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
duke@0 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
duke@0 124
duke@0 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
duke@0 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
duke@0 127
duke@0 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
duke@0 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
duke@0 130
duke@0 131
duke@0 132 // Floating Point Registers
duke@0 133
duke@0 134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
duke@0 135 // Word a in each register holds a Float, words ab hold a Double. We
duke@0 136 // currently do not use the SIMD capabilities, so registers cd are
duke@0 137 // unused at the moment.
duke@0 138 // XMM8-XMM15 must be encoded with REX.
duke@0 139 // Linux ABI: No register preserved across function calls
duke@0 140 // XMM0-XMM7 might hold parameters
duke@0 141 // Windows ABI: XMM6-XMM15 preserved across function calls
duke@0 142 // XMM0-XMM3 might hold parameters
duke@0 143
duke@0 144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
duke@0 145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
duke@0 146
duke@0 147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
duke@0 148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
duke@0 149
duke@0 150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
duke@0 151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
duke@0 152
duke@0 153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
duke@0 154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
duke@0 155
duke@0 156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
duke@0 157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
duke@0 158
duke@0 159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
duke@0 160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
duke@0 161
duke@0 162 #ifdef _WIN64
duke@0 163
duke@0 164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
duke@0 165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 166
duke@0 167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
duke@0 168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 169
duke@0 170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
duke@0 171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 172
duke@0 173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
duke@0 174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 175
duke@0 176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
duke@0 177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 178
duke@0 179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
duke@0 180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 181
duke@0 182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
duke@0 183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 184
duke@0 185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
duke@0 186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 187
duke@0 188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
duke@0 189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 190
duke@0 191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
duke@0 192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 193
duke@0 194 #else
duke@0 195
duke@0 196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
duke@0 197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 198
duke@0 199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
duke@0 200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 201
duke@0 202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
duke@0 203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 204
duke@0 205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
duke@0 206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 207
duke@0 208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
duke@0 209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 210
duke@0 211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
duke@0 212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 213
duke@0 214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
duke@0 215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 216
duke@0 217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
duke@0 218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 219
duke@0 220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
duke@0 221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 222
duke@0 223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
duke@0 224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 225
duke@0 226 #endif // _WIN64
duke@0 227
duke@0 228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
duke@0 229
duke@0 230 // Specify priority of register selection within phases of register
duke@0 231 // allocation. Highest priority is first. A useful heuristic is to
duke@0 232 // give registers a low priority when they are required by machine
duke@0 233 // instructions, like EAX and EDX on I486, and choose no-save registers
duke@0 234 // before save-on-call, & save-on-call before save-on-entry. Registers
duke@0 235 // which participate in fixed calling sequences should come last.
duke@0 236 // Registers which are used as pairs must fall on an even boundary.
duke@0 237
duke@0 238 alloc_class chunk0(R10, R10_H,
duke@0 239 R11, R11_H,
duke@0 240 R8, R8_H,
duke@0 241 R9, R9_H,
duke@0 242 R12, R12_H,
duke@0 243 RCX, RCX_H,
duke@0 244 RBX, RBX_H,
duke@0 245 RDI, RDI_H,
duke@0 246 RDX, RDX_H,
duke@0 247 RSI, RSI_H,
duke@0 248 RAX, RAX_H,
duke@0 249 RBP, RBP_H,
duke@0 250 R13, R13_H,
duke@0 251 R14, R14_H,
duke@0 252 R15, R15_H,
duke@0 253 RSP, RSP_H);
duke@0 254
duke@0 255 // XXX probably use 8-15 first on Linux
duke@0 256 alloc_class chunk1(XMM0, XMM0_H,
duke@0 257 XMM1, XMM1_H,
duke@0 258 XMM2, XMM2_H,
duke@0 259 XMM3, XMM3_H,
duke@0 260 XMM4, XMM4_H,
duke@0 261 XMM5, XMM5_H,
duke@0 262 XMM6, XMM6_H,
duke@0 263 XMM7, XMM7_H,
duke@0 264 XMM8, XMM8_H,
duke@0 265 XMM9, XMM9_H,
duke@0 266 XMM10, XMM10_H,
duke@0 267 XMM11, XMM11_H,
duke@0 268 XMM12, XMM12_H,
duke@0 269 XMM13, XMM13_H,
duke@0 270 XMM14, XMM14_H,
duke@0 271 XMM15, XMM15_H);
duke@0 272
duke@0 273 alloc_class chunk2(RFLAGS);
duke@0 274
duke@0 275
duke@0 276 //----------Architecture Description Register Classes--------------------------
duke@0 277 // Several register classes are automatically defined based upon information in
duke@0 278 // this architecture description.
duke@0 279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
duke@0 280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
duke@0 281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
duke@0 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
duke@0 283 //
duke@0 284
duke@0 285 // Class for all pointer registers (including RSP)
duke@0 286 reg_class any_reg(RAX, RAX_H,
duke@0 287 RDX, RDX_H,
duke@0 288 RBP, RBP_H,
duke@0 289 RDI, RDI_H,
duke@0 290 RSI, RSI_H,
duke@0 291 RCX, RCX_H,
duke@0 292 RBX, RBX_H,
duke@0 293 RSP, RSP_H,
duke@0 294 R8, R8_H,
duke@0 295 R9, R9_H,
duke@0 296 R10, R10_H,
duke@0 297 R11, R11_H,
duke@0 298 R12, R12_H,
duke@0 299 R13, R13_H,
duke@0 300 R14, R14_H,
duke@0 301 R15, R15_H);
duke@0 302
duke@0 303 // Class for all pointer registers except RSP
duke@0 304 reg_class ptr_reg(RAX, RAX_H,
duke@0 305 RDX, RDX_H,
duke@0 306 RBP, RBP_H,
duke@0 307 RDI, RDI_H,
duke@0 308 RSI, RSI_H,
duke@0 309 RCX, RCX_H,
duke@0 310 RBX, RBX_H,
duke@0 311 R8, R8_H,
duke@0 312 R9, R9_H,
duke@0 313 R10, R10_H,
duke@0 314 R11, R11_H,
duke@0 315 R13, R13_H,
duke@0 316 R14, R14_H);
duke@0 317
duke@0 318 // Class for all pointer registers except RAX and RSP
duke@0 319 reg_class ptr_no_rax_reg(RDX, RDX_H,
duke@0 320 RBP, RBP_H,
duke@0 321 RDI, RDI_H,
duke@0 322 RSI, RSI_H,
duke@0 323 RCX, RCX_H,
duke@0 324 RBX, RBX_H,
duke@0 325 R8, R8_H,
duke@0 326 R9, R9_H,
duke@0 327 R10, R10_H,
duke@0 328 R11, R11_H,
duke@0 329 R13, R13_H,
duke@0 330 R14, R14_H);
duke@0 331
duke@0 332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
duke@0 333 RAX, RAX_H,
duke@0 334 RDI, RDI_H,
duke@0 335 RSI, RSI_H,
duke@0 336 RCX, RCX_H,
duke@0 337 RBX, RBX_H,
duke@0 338 R8, R8_H,
duke@0 339 R9, R9_H,
duke@0 340 R10, R10_H,
duke@0 341 R11, R11_H,
duke@0 342 R13, R13_H,
duke@0 343 R14, R14_H);
duke@0 344
duke@0 345 // Class for all pointer registers except RAX, RBX and RSP
duke@0 346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
duke@0 347 RBP, RBP_H,
duke@0 348 RDI, RDI_H,
duke@0 349 RSI, RSI_H,
duke@0 350 RCX, RCX_H,
duke@0 351 R8, R8_H,
duke@0 352 R9, R9_H,
duke@0 353 R10, R10_H,
duke@0 354 R11, R11_H,
duke@0 355 R13, R13_H,
duke@0 356 R14, R14_H);
duke@0 357
duke@0 358 // Singleton class for RAX pointer register
duke@0 359 reg_class ptr_rax_reg(RAX, RAX_H);
duke@0 360
duke@0 361 // Singleton class for RBX pointer register
duke@0 362 reg_class ptr_rbx_reg(RBX, RBX_H);
duke@0 363
duke@0 364 // Singleton class for RSI pointer register
duke@0 365 reg_class ptr_rsi_reg(RSI, RSI_H);
duke@0 366
duke@0 367 // Singleton class for RDI pointer register
duke@0 368 reg_class ptr_rdi_reg(RDI, RDI_H);
duke@0 369
duke@0 370 // Singleton class for RBP pointer register
duke@0 371 reg_class ptr_rbp_reg(RBP, RBP_H);
duke@0 372
duke@0 373 // Singleton class for stack pointer
duke@0 374 reg_class ptr_rsp_reg(RSP, RSP_H);
duke@0 375
duke@0 376 // Singleton class for TLS pointer
duke@0 377 reg_class ptr_r15_reg(R15, R15_H);
duke@0 378
duke@0 379 // Class for all long registers (except RSP)
duke@0 380 reg_class long_reg(RAX, RAX_H,
duke@0 381 RDX, RDX_H,
duke@0 382 RBP, RBP_H,
duke@0 383 RDI, RDI_H,
duke@0 384 RSI, RSI_H,
duke@0 385 RCX, RCX_H,
duke@0 386 RBX, RBX_H,
duke@0 387 R8, R8_H,
duke@0 388 R9, R9_H,
duke@0 389 R10, R10_H,
duke@0 390 R11, R11_H,
duke@0 391 R13, R13_H,
duke@0 392 R14, R14_H);
duke@0 393
duke@0 394 // Class for all long registers except RAX, RDX (and RSP)
duke@0 395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
duke@0 396 RDI, RDI_H,
duke@0 397 RSI, RSI_H,
duke@0 398 RCX, RCX_H,
duke@0 399 RBX, RBX_H,
duke@0 400 R8, R8_H,
duke@0 401 R9, R9_H,
duke@0 402 R10, R10_H,
duke@0 403 R11, R11_H,
duke@0 404 R13, R13_H,
duke@0 405 R14, R14_H);
duke@0 406
duke@0 407 // Class for all long registers except RCX (and RSP)
duke@0 408 reg_class long_no_rcx_reg(RBP, RBP_H,
duke@0 409 RDI, RDI_H,
duke@0 410 RSI, RSI_H,
duke@0 411 RAX, RAX_H,
duke@0 412 RDX, RDX_H,
duke@0 413 RBX, RBX_H,
duke@0 414 R8, R8_H,
duke@0 415 R9, R9_H,
duke@0 416 R10, R10_H,
duke@0 417 R11, R11_H,
duke@0 418 R13, R13_H,
duke@0 419 R14, R14_H);
duke@0 420
duke@0 421 // Class for all long registers except RAX (and RSP)
duke@0 422 reg_class long_no_rax_reg(RBP, RBP_H,
duke@0 423 RDX, RDX_H,
duke@0 424 RDI, RDI_H,
duke@0 425 RSI, RSI_H,
duke@0 426 RCX, RCX_H,
duke@0 427 RBX, RBX_H,
duke@0 428 R8, R8_H,
duke@0 429 R9, R9_H,
duke@0 430 R10, R10_H,
duke@0 431 R11, R11_H,
duke@0 432 R13, R13_H,
duke@0 433 R14, R14_H);
duke@0 434
duke@0 435 // Singleton class for RAX long register
duke@0 436 reg_class long_rax_reg(RAX, RAX_H);
duke@0 437
duke@0 438 // Singleton class for RCX long register
duke@0 439 reg_class long_rcx_reg(RCX, RCX_H);
duke@0 440
duke@0 441 // Singleton class for RDX long register
duke@0 442 reg_class long_rdx_reg(RDX, RDX_H);
duke@0 443
duke@0 444 // Class for all int registers (except RSP)
duke@0 445 reg_class int_reg(RAX,
duke@0 446 RDX,
duke@0 447 RBP,
duke@0 448 RDI,
duke@0 449 RSI,
duke@0 450 RCX,
duke@0 451 RBX,
duke@0 452 R8,
duke@0 453 R9,
duke@0 454 R10,
duke@0 455 R11,
duke@0 456 R13,
duke@0 457 R14);
duke@0 458
duke@0 459 // Class for all int registers except RCX (and RSP)
duke@0 460 reg_class int_no_rcx_reg(RAX,
duke@0 461 RDX,
duke@0 462 RBP,
duke@0 463 RDI,
duke@0 464 RSI,
duke@0 465 RBX,
duke@0 466 R8,
duke@0 467 R9,
duke@0 468 R10,
duke@0 469 R11,
duke@0 470 R13,
duke@0 471 R14);
duke@0 472
duke@0 473 // Class for all int registers except RAX, RDX (and RSP)
duke@0 474 reg_class int_no_rax_rdx_reg(RBP,
never@304 475 RDI,
duke@0 476 RSI,
duke@0 477 RCX,
duke@0 478 RBX,
duke@0 479 R8,
duke@0 480 R9,
duke@0 481 R10,
duke@0 482 R11,
duke@0 483 R13,
duke@0 484 R14);
duke@0 485
duke@0 486 // Singleton class for RAX int register
duke@0 487 reg_class int_rax_reg(RAX);
duke@0 488
duke@0 489 // Singleton class for RBX int register
duke@0 490 reg_class int_rbx_reg(RBX);
duke@0 491
duke@0 492 // Singleton class for RCX int register
duke@0 493 reg_class int_rcx_reg(RCX);
duke@0 494
duke@0 495 // Singleton class for RCX int register
duke@0 496 reg_class int_rdx_reg(RDX);
duke@0 497
duke@0 498 // Singleton class for RCX int register
duke@0 499 reg_class int_rdi_reg(RDI);
duke@0 500
duke@0 501 // Singleton class for instruction pointer
duke@0 502 // reg_class ip_reg(RIP);
duke@0 503
duke@0 504 // Singleton class for condition codes
duke@0 505 reg_class int_flags(RFLAGS);
duke@0 506
duke@0 507 // Class for all float registers
duke@0 508 reg_class float_reg(XMM0,
duke@0 509 XMM1,
duke@0 510 XMM2,
duke@0 511 XMM3,
duke@0 512 XMM4,
duke@0 513 XMM5,
duke@0 514 XMM6,
duke@0 515 XMM7,
duke@0 516 XMM8,
duke@0 517 XMM9,
duke@0 518 XMM10,
duke@0 519 XMM11,
duke@0 520 XMM12,
duke@0 521 XMM13,
duke@0 522 XMM14,
duke@0 523 XMM15);
duke@0 524
duke@0 525 // Class for all double registers
duke@0 526 reg_class double_reg(XMM0, XMM0_H,
duke@0 527 XMM1, XMM1_H,
duke@0 528 XMM2, XMM2_H,
duke@0 529 XMM3, XMM3_H,
duke@0 530 XMM4, XMM4_H,
duke@0 531 XMM5, XMM5_H,
duke@0 532 XMM6, XMM6_H,
duke@0 533 XMM7, XMM7_H,
duke@0 534 XMM8, XMM8_H,
duke@0 535 XMM9, XMM9_H,
duke@0 536 XMM10, XMM10_H,
duke@0 537 XMM11, XMM11_H,
duke@0 538 XMM12, XMM12_H,
duke@0 539 XMM13, XMM13_H,
duke@0 540 XMM14, XMM14_H,
duke@0 541 XMM15, XMM15_H);
duke@0 542 %}
duke@0 543
duke@0 544
duke@0 545 //----------SOURCE BLOCK-------------------------------------------------------
duke@0 546 // This is a block of C++ code which provides values, functions, and
duke@0 547 // definitions necessary in the rest of the architecture description
duke@0 548 source %{
never@304 549 #define RELOC_IMM64 Assembler::imm_operand
duke@0 550 #define RELOC_DISP32 Assembler::disp32_operand
duke@0 551
duke@0 552 #define __ _masm.
duke@0 553
twisti@1137 554 static int preserve_SP_size() {
kvn@2953 555 return 3; // rex.w, op, rm(reg/reg)
twisti@1137 556 }
twisti@1137 557
duke@0 558 // !!!!! Special hack to get all types of calls to specify the byte offset
duke@0 559 // from the start of the call to the point where the return address
duke@0 560 // will point.
duke@0 561 int MachCallStaticJavaNode::ret_addr_offset()
duke@0 562 {
twisti@1137 563 int offset = 5; // 5 bytes from start of call to where return address points
twisti@1137 564 if (_method_handle_invoke)
twisti@1137 565 offset += preserve_SP_size();
twisti@1137 566 return offset;
duke@0 567 }
duke@0 568
duke@0 569 int MachCallDynamicJavaNode::ret_addr_offset()
duke@0 570 {
duke@0 571 return 15; // 15 bytes from start of call to where return address points
duke@0 572 }
duke@0 573
duke@0 574 // In os_cpu .ad file
duke@0 575 // int MachCallRuntimeNode::ret_addr_offset()
duke@0 576
iveresov@2251 577 // Indicate if the safepoint node needs the polling page as an input,
iveresov@2251 578 // it does if the polling page is more than disp32 away.
duke@0 579 bool SafePointNode::needs_polling_address_input()
duke@0 580 {
iveresov@2251 581 return Assembler::is_polling_page_far();
duke@0 582 }
duke@0 583
duke@0 584 //
duke@0 585 // Compute padding required for nodes which need alignment
duke@0 586 //
duke@0 587
duke@0 588 // The address of the call instruction needs to be 4-byte aligned to
duke@0 589 // ensure that it does not span a cache line so that it can be patched.
duke@0 590 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
duke@0 591 {
duke@0 592 current_offset += 1; // skip call opcode byte
duke@0 593 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 594 }
duke@0 595
duke@0 596 // The address of the call instruction needs to be 4-byte aligned to
duke@0 597 // ensure that it does not span a cache line so that it can be patched.
twisti@1137 598 int CallStaticJavaHandleNode::compute_padding(int current_offset) const
twisti@1137 599 {
twisti@1137 600 current_offset += preserve_SP_size(); // skip mov rbp, rsp
twisti@1137 601 current_offset += 1; // skip call opcode byte
twisti@1137 602 return round_to(current_offset, alignment_required()) - current_offset;
twisti@1137 603 }
twisti@1137 604
twisti@1137 605 // The address of the call instruction needs to be 4-byte aligned to
twisti@1137 606 // ensure that it does not span a cache line so that it can be patched.
duke@0 607 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
duke@0 608 {
duke@0 609 current_offset += 11; // skip movq instruction + call opcode byte
duke@0 610 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 611 }
duke@0 612
duke@0 613 #ifndef PRODUCT
duke@0 614 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 615 {
duke@0 616 st->print("INT3");
duke@0 617 }
duke@0 618 #endif
duke@0 619
duke@0 620 // EMIT_RM()
twisti@1668 621 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
duke@0 622 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
twisti@1668 623 cbuf.insts()->emit_int8(c);
duke@0 624 }
duke@0 625
duke@0 626 // EMIT_CC()
twisti@1668 627 void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
duke@0 628 unsigned char c = (unsigned char) (f1 | f2);
twisti@1668 629 cbuf.insts()->emit_int8(c);
duke@0 630 }
duke@0 631
duke@0 632 // EMIT_OPCODE()
twisti@1668 633 void emit_opcode(CodeBuffer &cbuf, int code) {
twisti@1668 634 cbuf.insts()->emit_int8((unsigned char) code);
duke@0 635 }
duke@0 636
duke@0 637 // EMIT_OPCODE() w/ relocation information
duke@0 638 void emit_opcode(CodeBuffer &cbuf,
duke@0 639 int code, relocInfo::relocType reloc, int offset, int format)
duke@0 640 {
twisti@1668 641 cbuf.relocate(cbuf.insts_mark() + offset, reloc, format);
duke@0 642 emit_opcode(cbuf, code);
duke@0 643 }
duke@0 644
duke@0 645 // EMIT_D8()
twisti@1668 646 void emit_d8(CodeBuffer &cbuf, int d8) {
twisti@1668 647 cbuf.insts()->emit_int8((unsigned char) d8);
duke@0 648 }
duke@0 649
duke@0 650 // EMIT_D16()
twisti@1668 651 void emit_d16(CodeBuffer &cbuf, int d16) {
twisti@1668 652 cbuf.insts()->emit_int16(d16);
duke@0 653 }
duke@0 654
duke@0 655 // EMIT_D32()
twisti@1668 656 void emit_d32(CodeBuffer &cbuf, int d32) {
twisti@1668 657 cbuf.insts()->emit_int32(d32);
duke@0 658 }
duke@0 659
duke@0 660 // EMIT_D64()
twisti@1668 661 void emit_d64(CodeBuffer &cbuf, int64_t d64) {
twisti@1668 662 cbuf.insts()->emit_int64(d64);
duke@0 663 }
duke@0 664
duke@0 665 // emit 32 bit value and construct relocation entry from relocInfo::relocType
duke@0 666 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 667 int d32,
duke@0 668 relocInfo::relocType reloc,
duke@0 669 int format)
duke@0 670 {
duke@0 671 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
twisti@1668 672 cbuf.relocate(cbuf.insts_mark(), reloc, format);
twisti@1668 673 cbuf.insts()->emit_int32(d32);
duke@0 674 }
duke@0 675
duke@0 676 // emit 32 bit value and construct relocation entry from RelocationHolder
twisti@1668 677 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) {
duke@0 678 #ifdef ASSERT
duke@0 679 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 680 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
jrose@989 681 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
duke@0 682 }
duke@0 683 #endif
twisti@1668 684 cbuf.relocate(cbuf.insts_mark(), rspec, format);
twisti@1668 685 cbuf.insts()->emit_int32(d32);
duke@0 686 }
duke@0 687
duke@0 688 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
twisti@1668 689 address next_ip = cbuf.insts_end() + 4;
duke@0 690 emit_d32_reloc(cbuf, (int) (addr - next_ip),
duke@0 691 external_word_Relocation::spec(addr),
duke@0 692 RELOC_DISP32);
duke@0 693 }
duke@0 694
duke@0 695
duke@0 696 // emit 64 bit value and construct relocation entry from relocInfo::relocType
twisti@1668 697 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) {
twisti@1668 698 cbuf.relocate(cbuf.insts_mark(), reloc, format);
twisti@1668 699 cbuf.insts()->emit_int64(d64);
duke@0 700 }
duke@0 701
duke@0 702 // emit 64 bit value and construct relocation entry from RelocationHolder
twisti@1668 703 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) {
duke@0 704 #ifdef ASSERT
duke@0 705 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 706 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
jrose@989 707 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
jrose@989 708 "cannot embed scavengable oops in code");
duke@0 709 }
duke@0 710 #endif
twisti@1668 711 cbuf.relocate(cbuf.insts_mark(), rspec, format);
twisti@1668 712 cbuf.insts()->emit_int64(d64);
duke@0 713 }
duke@0 714
duke@0 715 // Access stack slot for load or store
duke@0 716 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
duke@0 717 {
duke@0 718 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
duke@0 719 if (-0x80 <= disp && disp < 0x80) {
duke@0 720 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
duke@0 721 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 722 emit_d8(cbuf, disp); // Displacement // R/M byte
duke@0 723 } else {
duke@0 724 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
duke@0 725 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 726 emit_d32(cbuf, disp); // Displacement // R/M byte
duke@0 727 }
duke@0 728 }
duke@0 729
duke@0 730 // rRegI ereg, memory mem) %{ // emit_reg_mem
duke@0 731 void encode_RegMem(CodeBuffer &cbuf,
duke@0 732 int reg,
duke@0 733 int base, int index, int scale, int disp, bool disp_is_oop)
duke@0 734 {
duke@0 735 assert(!disp_is_oop, "cannot have disp");
duke@0 736 int regenc = reg & 7;
duke@0 737 int baseenc = base & 7;
duke@0 738 int indexenc = index & 7;
duke@0 739
duke@0 740 // There is no index & no scale, use form without SIB byte
duke@0 741 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
duke@0 742 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 743 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 744 emit_rm(cbuf, 0x0, regenc, baseenc); // *
duke@0 745 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 746 // If 8-bit displacement, mode 0x1
duke@0 747 emit_rm(cbuf, 0x1, regenc, baseenc); // *
duke@0 748 emit_d8(cbuf, disp);
duke@0 749 } else {
duke@0 750 // If 32-bit displacement
duke@0 751 if (base == -1) { // Special flag for absolute address
duke@0 752 emit_rm(cbuf, 0x0, regenc, 0x5); // *
duke@0 753 if (disp_is_oop) {
duke@0 754 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 755 } else {
duke@0 756 emit_d32(cbuf, disp);
duke@0 757 }
duke@0 758 } else {
duke@0 759 // Normal base + offset
duke@0 760 emit_rm(cbuf, 0x2, regenc, baseenc); // *
duke@0 761 if (disp_is_oop) {
duke@0 762 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 763 } else {
duke@0 764 emit_d32(cbuf, disp);
duke@0 765 }
duke@0 766 }
duke@0 767 }
duke@0 768 } else {
duke@0 769 // Else, encode with the SIB byte
duke@0 770 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 771 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 772 // If no displacement
duke@0 773 emit_rm(cbuf, 0x0, regenc, 0x4); // *
duke@0 774 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 775 } else {
duke@0 776 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 777 // If 8-bit displacement, mode 0x1
duke@0 778 emit_rm(cbuf, 0x1, regenc, 0x4); // *
duke@0 779 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 780 emit_d8(cbuf, disp);
duke@0 781 } else {
duke@0 782 // If 32-bit displacement
duke@0 783 if (base == 0x04 ) {
duke@0 784 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 785 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
duke@0 786 } else {
duke@0 787 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 788 emit_rm(cbuf, scale, indexenc, baseenc); // *
duke@0 789 }
duke@0 790 if (disp_is_oop) {
duke@0 791 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 792 } else {
duke@0 793 emit_d32(cbuf, disp);
duke@0 794 }
duke@0 795 }
duke@0 796 }
duke@0 797 }
duke@0 798 }
duke@0 799
never@2545 800 // This could be in MacroAssembler but it's fairly C2 specific
never@2545 801 void emit_cmpfp_fixup(MacroAssembler& _masm) {
never@2545 802 Label exit;
never@2545 803 __ jccb(Assembler::noParity, exit);
never@2545 804 __ pushf();
kvn@2953 805 //
kvn@2953 806 // comiss/ucomiss instructions set ZF,PF,CF flags and
kvn@2953 807 // zero OF,AF,SF for NaN values.
kvn@2953 808 // Fixup flags by zeroing ZF,PF so that compare of NaN
kvn@2953 809 // values returns 'less than' result (CF is set).
kvn@2953 810 // Leave the rest of flags unchanged.
kvn@2953 811 //
kvn@2953 812 // 7 6 5 4 3 2 1 0
kvn@2953 813 // |S|Z|r|A|r|P|r|C| (r - reserved bit)
kvn@2953 814 // 0 0 1 0 1 0 1 1 (0x2B)
kvn@2953 815 //
never@2545 816 __ andq(Address(rsp, 0), 0xffffff2b);
never@2545 817 __ popf();
never@2545 818 __ bind(exit);
kvn@2953 819 }
kvn@2953 820
kvn@2953 821 void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
kvn@2953 822 Label done;
kvn@2953 823 __ movl(dst, -1);
kvn@2953 824 __ jcc(Assembler::parity, done);
kvn@2953 825 __ jcc(Assembler::below, done);
kvn@2953 826 __ setb(Assembler::notEqual, dst);
kvn@2953 827 __ movzbl(dst, dst);
kvn@2953 828 __ bind(done);
never@2545 829 }
never@2545 830
duke@0 831
duke@0 832 //=============================================================================
twisti@1915 833 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
twisti@1915 834
twisti@2875 835 int Compile::ConstantTable::calculate_table_base_offset() const {
twisti@2875 836 return 0; // absolute addressing, no offset
twisti@2875 837 }
twisti@2875 838
twisti@1915 839 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
twisti@1915 840 // Empty encoding
twisti@1915 841 }
twisti@1915 842
twisti@1915 843 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
twisti@1915 844 return 0;
twisti@1915 845 }
twisti@1915 846
twisti@1915 847 #ifndef PRODUCT
twisti@1915 848 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
twisti@1915 849 st->print("# MachConstantBaseNode (empty encoding)");
twisti@1915 850 }
twisti@1915 851 #endif
twisti@1915 852
twisti@1915 853
twisti@1915 854 //=============================================================================
duke@0 855 #ifndef PRODUCT
duke@0 856 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 857 {
duke@0 858 Compile* C = ra_->C;
duke@0 859
duke@0 860 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 861 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 862 // Remove wordSize for return adr already pushed
duke@0 863 // and another for the RBP we are going to save
duke@0 864 framesize -= 2*wordSize;
duke@0 865 bool need_nop = true;
duke@0 866
duke@0 867 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 868 // We require that their callers must bang for them. But be
duke@0 869 // careful, because some VM calls (such as call site linkage) can
duke@0 870 // use several kilobytes of stack. But the stack safety zone should
duke@0 871 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 872 if (C->need_stack_bang(framesize)) {
duke@0 873 st->print_cr("# stack bang"); st->print("\t");
duke@0 874 need_nop = false;
duke@0 875 }
duke@0 876 st->print_cr("pushq rbp"); st->print("\t");
duke@0 877
duke@0 878 if (VerifyStackAtCalls) {
duke@0 879 // Majik cookie to verify stack depth
duke@0 880 st->print_cr("pushq 0xffffffffbadb100d"
duke@0 881 "\t# Majik cookie for stack depth check");
duke@0 882 st->print("\t");
duke@0 883 framesize -= wordSize; // Remove 2 for cookie
duke@0 884 need_nop = false;
duke@0 885 }
duke@0 886
duke@0 887 if (framesize) {
duke@0 888 st->print("subq rsp, #%d\t# Create frame", framesize);
duke@0 889 if (framesize < 0x80 && need_nop) {
duke@0 890 st->print("\n\tnop\t# nop for patch_verified_entry");
duke@0 891 }
duke@0 892 }
duke@0 893 }
duke@0 894 #endif
duke@0 895
duke@0 896 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 897 {
duke@0 898 Compile* C = ra_->C;
duke@0 899
duke@0 900 // WARNING: Initial instruction MUST be 5 bytes or longer so that
duke@0 901 // NativeJump::patch_verified_entry will be able to patch out the entry
duke@0 902 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
duke@0 903 // depth is ok at 5 bytes, the frame allocation can be either 3 or
duke@0 904 // 6 bytes. So if we don't do the fldcw or the push then we must
duke@0 905 // use the 6 byte frame allocation even if we have no frame. :-(
duke@0 906 // If method sets FPU control word do it now
duke@0 907
duke@0 908 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 909 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 910 // Remove wordSize for return adr already pushed
duke@0 911 // and another for the RBP we are going to save
duke@0 912 framesize -= 2*wordSize;
duke@0 913 bool need_nop = true;
duke@0 914
duke@0 915 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 916 // We require that their callers must bang for them. But be
duke@0 917 // careful, because some VM calls (such as call site linkage) can
duke@0 918 // use several kilobytes of stack. But the stack safety zone should
duke@0 919 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 920 if (C->need_stack_bang(framesize)) {
duke@0 921 MacroAssembler masm(&cbuf);
duke@0 922 masm.generate_stack_overflow_check(framesize);
duke@0 923 need_nop = false;
duke@0 924 }
duke@0 925
duke@0 926 // We always push rbp so that on return to interpreter rbp will be
duke@0 927 // restored correctly and we can correct the stack.
duke@0 928 emit_opcode(cbuf, 0x50 | RBP_enc);
duke@0 929
duke@0 930 if (VerifyStackAtCalls) {
duke@0 931 // Majik cookie to verify stack depth
duke@0 932 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
duke@0 933 emit_d32(cbuf, 0xbadb100d);
duke@0 934 framesize -= wordSize; // Remove 2 for cookie
duke@0 935 need_nop = false;
duke@0 936 }
duke@0 937
duke@0 938 if (framesize) {
duke@0 939 emit_opcode(cbuf, Assembler::REX_W);
duke@0 940 if (framesize < 0x80) {
duke@0 941 emit_opcode(cbuf, 0x83); // sub SP,#framesize
duke@0 942 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 943 emit_d8(cbuf, framesize);
duke@0 944 if (need_nop) {
duke@0 945 emit_opcode(cbuf, 0x90); // nop
duke@0 946 }
duke@0 947 } else {
duke@0 948 emit_opcode(cbuf, 0x81); // sub SP,#framesize
duke@0 949 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 950 emit_d32(cbuf, framesize);
duke@0 951 }
duke@0 952 }
duke@0 953
twisti@1668 954 C->set_frame_complete(cbuf.insts_size());
duke@0 955
duke@0 956 #ifdef ASSERT
duke@0 957 if (VerifyStackAtCalls) {
duke@0 958 Label L;
duke@0 959 MacroAssembler masm(&cbuf);
never@304 960 masm.push(rax);
never@304 961 masm.mov(rax, rsp);
never@304 962 masm.andptr(rax, StackAlignmentInBytes-1);
never@304 963 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
never@304 964 masm.pop(rax);
duke@0 965 masm.jcc(Assembler::equal, L);
duke@0 966 masm.stop("Stack is not properly aligned!");
duke@0 967 masm.bind(L);
duke@0 968 }
duke@0 969 #endif
twisti@2875 970
twisti@2875 971 if (C->has_mach_constant_base_node()) {
twisti@2875 972 // NOTE: We set the table base offset here because users might be
twisti@2875 973 // emitted before MachConstantBaseNode.
twisti@2875 974 Compile::ConstantTable& constant_table = C->constant_table();
twisti@2875 975 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
twisti@2875 976 }
duke@0 977 }
duke@0 978
duke@0 979 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
duke@0 980 {
duke@0 981 return MachNode::size(ra_); // too many variables; just compute it
duke@0 982 // the hard way
duke@0 983 }
duke@0 984
duke@0 985 int MachPrologNode::reloc() const
duke@0 986 {
duke@0 987 return 0; // a large enough number
duke@0 988 }
duke@0 989
duke@0 990 //=============================================================================
duke@0 991 #ifndef PRODUCT
duke@0 992 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 993 {
duke@0 994 Compile* C = ra_->C;
duke@0 995 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 996 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 997 // Remove word for return adr already pushed
duke@0 998 // and RBP
duke@0 999 framesize -= 2*wordSize;
duke@0 1000
duke@0 1001 if (framesize) {
iveresov@2251 1002 st->print_cr("addq rsp, %d\t# Destroy frame", framesize);
duke@0 1003 st->print("\t");
duke@0 1004 }
duke@0 1005
iveresov@2251 1006 st->print_cr("popq rbp");
duke@0 1007 if (do_polling() && C->is_method_compilation()) {
duke@0 1008 st->print("\t");
iveresov@2251 1009 if (Assembler::is_polling_page_far()) {
iveresov@2251 1010 st->print_cr("movq rscratch1, #polling_page_address\n\t"
iveresov@2251 1011 "testl rax, [rscratch1]\t"
iveresov@2251 1012 "# Safepoint: poll for GC");
iveresov@2251 1013 } else {
iveresov@2251 1014 st->print_cr("testl rax, [rip + #offset_to_poll_page]\t"
iveresov@2251 1015 "# Safepoint: poll for GC");
iveresov@2251 1016 }
duke@0 1017 }
duke@0 1018 }
duke@0 1019 #endif
duke@0 1020
duke@0 1021 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1022 {
duke@0 1023 Compile* C = ra_->C;
duke@0 1024 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1025 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1026 // Remove word for return adr already pushed
duke@0 1027 // and RBP
duke@0 1028 framesize -= 2*wordSize;
duke@0 1029
duke@0 1030 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
duke@0 1031
duke@0 1032 if (framesize) {
duke@0 1033 emit_opcode(cbuf, Assembler::REX_W);
duke@0 1034 if (framesize < 0x80) {
duke@0 1035 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
duke@0 1036 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1037 emit_d8(cbuf, framesize);
duke@0 1038 } else {
duke@0 1039 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
duke@0 1040 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1041 emit_d32(cbuf, framesize);
duke@0 1042 }
duke@0 1043 }
duke@0 1044
duke@0 1045 // popq rbp
duke@0 1046 emit_opcode(cbuf, 0x58 | RBP_enc);
duke@0 1047
duke@0 1048 if (do_polling() && C->is_method_compilation()) {
iveresov@2251 1049 MacroAssembler _masm(&cbuf);
iveresov@2251 1050 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
iveresov@2251 1051 if (Assembler::is_polling_page_far()) {
iveresov@2251 1052 __ lea(rscratch1, polling_page);
iveresov@2251 1053 __ relocate(relocInfo::poll_return_type);
iveresov@2251 1054 __ testl(rax, Address(rscratch1, 0));
iveresov@2251 1055 } else {
iveresov@2251 1056 __ testl(rax, polling_page);
iveresov@2251 1057 }
duke@0 1058 }
duke@0 1059 }
duke@0 1060
duke@0 1061 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
duke@0 1062 {
iveresov@2251 1063 return MachNode::size(ra_); // too many variables; just compute it
iveresov@2251 1064 // the hard way
duke@0 1065 }
duke@0 1066
duke@0 1067 int MachEpilogNode::reloc() const
duke@0 1068 {
duke@0 1069 return 2; // a large enough number
duke@0 1070 }
duke@0 1071
duke@0 1072 const Pipeline* MachEpilogNode::pipeline() const
duke@0 1073 {
duke@0 1074 return MachNode::pipeline_class();
duke@0 1075 }
duke@0 1076
duke@0 1077 int MachEpilogNode::safepoint_offset() const
duke@0 1078 {
duke@0 1079 return 0;
duke@0 1080 }
duke@0 1081
duke@0 1082 //=============================================================================
duke@0 1083
duke@0 1084 enum RC {
duke@0 1085 rc_bad,
duke@0 1086 rc_int,
duke@0 1087 rc_float,
duke@0 1088 rc_stack
duke@0 1089 };
duke@0 1090
duke@0 1091 static enum RC rc_class(OptoReg::Name reg)
duke@0 1092 {
duke@0 1093 if( !OptoReg::is_valid(reg) ) return rc_bad;
duke@0 1094
duke@0 1095 if (OptoReg::is_stack(reg)) return rc_stack;
duke@0 1096
duke@0 1097 VMReg r = OptoReg::as_VMReg(reg);
duke@0 1098
duke@0 1099 if (r->is_Register()) return rc_int;
duke@0 1100
duke@0 1101 assert(r->is_XMMRegister(), "must be");
duke@0 1102 return rc_float;
duke@0 1103 }
duke@0 1104
duke@0 1105 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
duke@0 1106 PhaseRegAlloc* ra_,
duke@0 1107 bool do_size,
duke@0 1108 outputStream* st) const
duke@0 1109 {
duke@0 1110
duke@0 1111 // Get registers to move
duke@0 1112 OptoReg::Name src_second = ra_->get_reg_second(in(1));
duke@0 1113 OptoReg::Name src_first = ra_->get_reg_first(in(1));
duke@0 1114 OptoReg::Name dst_second = ra_->get_reg_second(this);
duke@0 1115 OptoReg::Name dst_first = ra_->get_reg_first(this);
duke@0 1116
duke@0 1117 enum RC src_second_rc = rc_class(src_second);
duke@0 1118 enum RC src_first_rc = rc_class(src_first);
duke@0 1119 enum RC dst_second_rc = rc_class(dst_second);
duke@0 1120 enum RC dst_first_rc = rc_class(dst_first);
duke@0 1121
duke@0 1122 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
duke@0 1123 "must move at least 1 register" );
duke@0 1124
duke@0 1125 if (src_first == dst_first && src_second == dst_second) {
duke@0 1126 // Self copy, no move
duke@0 1127 return 0;
duke@0 1128 } else if (src_first_rc == rc_stack) {
duke@0 1129 // mem ->
duke@0 1130 if (dst_first_rc == rc_stack) {
duke@0 1131 // mem -> mem
duke@0 1132 assert(src_second != dst_first, "overlap");
duke@0 1133 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1134 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1135 // 64-bit
duke@0 1136 int src_offset = ra_->reg2offset(src_first);
duke@0 1137 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1138 if (cbuf) {
duke@0 1139 emit_opcode(*cbuf, 0xFF);
duke@0 1140 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
duke@0 1141
duke@0 1142 emit_opcode(*cbuf, 0x8F);
duke@0 1143 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
duke@0 1144
duke@0 1145 #ifndef PRODUCT
duke@0 1146 } else if (!do_size) {
duke@0 1147 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
duke@0 1148 "popq [rsp + #%d]",
duke@0 1149 src_offset,
duke@0 1150 dst_offset);
duke@0 1151 #endif
duke@0 1152 }
duke@0 1153 return
duke@0 1154 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
duke@0 1155 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
duke@0 1156 } else {
duke@0 1157 // 32-bit
duke@0 1158 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1159 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1160 // No pushl/popl, so:
duke@0 1161 int src_offset = ra_->reg2offset(src_first);
duke@0 1162 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1163 if (cbuf) {
duke@0 1164 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1165 emit_opcode(*cbuf, 0x89);
duke@0 1166 emit_opcode(*cbuf, 0x44);
duke@0 1167 emit_opcode(*cbuf, 0x24);
duke@0 1168 emit_opcode(*cbuf, 0xF8);
duke@0 1169
duke@0 1170 emit_opcode(*cbuf, 0x8B);
duke@0 1171 encode_RegMem(*cbuf,
duke@0 1172 RAX_enc,
duke@0 1173 RSP_enc, 0x4, 0, src_offset,
duke@0 1174 false);
duke@0 1175
duke@0 1176 emit_opcode(*cbuf, 0x89);
duke@0 1177 encode_RegMem(*cbuf,
duke@0 1178 RAX_enc,
duke@0 1179 RSP_enc, 0x4, 0, dst_offset,
duke@0 1180 false);
duke@0 1181
duke@0 1182 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1183 emit_opcode(*cbuf, 0x8B);
duke@0 1184 emit_opcode(*cbuf, 0x44);
duke@0 1185 emit_opcode(*cbuf, 0x24);
duke@0 1186 emit_opcode(*cbuf, 0xF8);
duke@0 1187
duke@0 1188 #ifndef PRODUCT
duke@0 1189 } else if (!do_size) {
duke@0 1190 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
duke@0 1191 "movl rax, [rsp + #%d]\n\t"
duke@0 1192 "movl [rsp + #%d], rax\n\t"
duke@0 1193 "movq rax, [rsp - #8]",
duke@0 1194 src_offset,
duke@0 1195 dst_offset);
duke@0 1196 #endif
duke@0 1197 }
duke@0 1198 return
duke@0 1199 5 + // movq
duke@0 1200 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1201 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1202 5; // movq
duke@0 1203 }
duke@0 1204 } else if (dst_first_rc == rc_int) {
duke@0 1205 // mem -> gpr
duke@0 1206 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1207 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1208 // 64-bit
duke@0 1209 int offset = ra_->reg2offset(src_first);
duke@0 1210 if (cbuf) {
duke@0 1211 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1212 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1213 } else {
duke@0 1214 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1215 }
duke@0 1216 emit_opcode(*cbuf, 0x8B);
duke@0 1217 encode_RegMem(*cbuf,
duke@0 1218 Matcher::_regEncode[dst_first],
duke@0 1219 RSP_enc, 0x4, 0, offset,
duke@0 1220 false);
duke@0 1221 #ifndef PRODUCT
duke@0 1222 } else if (!do_size) {
duke@0 1223 st->print("movq %s, [rsp + #%d]\t# spill",
duke@0 1224 Matcher::regName[dst_first],
duke@0 1225 offset);
duke@0 1226 #endif
duke@0 1227 }
duke@0 1228 return
duke@0 1229 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1230 } else {
duke@0 1231 // 32-bit
duke@0 1232 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1233 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1234 int offset = ra_->reg2offset(src_first);
duke@0 1235 if (cbuf) {
duke@0 1236 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1237 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1238 }
duke@0 1239 emit_opcode(*cbuf, 0x8B);
duke@0 1240 encode_RegMem(*cbuf,
duke@0 1241 Matcher::_regEncode[dst_first],
duke@0 1242 RSP_enc, 0x4, 0, offset,
duke@0 1243 false);
duke@0 1244 #ifndef PRODUCT
duke@0 1245 } else if (!do_size) {
duke@0 1246 st->print("movl %s, [rsp + #%d]\t# spill",
duke@0 1247 Matcher::regName[dst_first],
duke@0 1248 offset);
duke@0 1249 #endif
duke@0 1250 }
duke@0 1251 return
duke@0 1252 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1253 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1254 ? 3
duke@0 1255 : 4); // REX
duke@0 1256 }
duke@0 1257 } else if (dst_first_rc == rc_float) {
duke@0 1258 // mem-> xmm
duke@0 1259 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1260 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1261 // 64-bit
duke@0 1262 int offset = ra_->reg2offset(src_first);
duke@0 1263 if (cbuf) {
kvn@2953 1264 MacroAssembler _masm(cbuf);
kvn@2953 1265 __ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
duke@0 1266 #ifndef PRODUCT
duke@0 1267 } else if (!do_size) {
duke@0 1268 st->print("%s %s, [rsp + #%d]\t# spill",
duke@0 1269 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
duke@0 1270 Matcher::regName[dst_first],
duke@0 1271 offset);
duke@0 1272 #endif
duke@0 1273 }
duke@0 1274 return
duke@0 1275 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
kvn@2953 1276 ((Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1277 ? 6
kvn@2953 1278 : (5 + ((UseAVX>0)?1:0))); // REX
duke@0 1279 } else {
duke@0 1280 // 32-bit
duke@0 1281 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1282 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1283 int offset = ra_->reg2offset(src_first);
duke@0 1284 if (cbuf) {
kvn@2953 1285 MacroAssembler _masm(cbuf);
kvn@2953 1286 __ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
duke@0 1287 #ifndef PRODUCT
duke@0 1288 } else if (!do_size) {
duke@0 1289 st->print("movss %s, [rsp + #%d]\t# spill",
duke@0 1290 Matcher::regName[dst_first],
duke@0 1291 offset);
duke@0 1292 #endif
duke@0 1293 }
duke@0 1294 return
duke@0 1295 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
kvn@2953 1296 ((Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1297 ? 6
kvn@2953 1298 : (5 + ((UseAVX>0)?1:0))); // REX
duke@0 1299 }
duke@0 1300 }
duke@0 1301 } else if (src_first_rc == rc_int) {
duke@0 1302 // gpr ->
duke@0 1303 if (dst_first_rc == rc_stack) {
duke@0 1304 // gpr -> mem
duke@0 1305 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1306 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1307 // 64-bit
duke@0 1308 int offset = ra_->reg2offset(dst_first);
duke@0 1309 if (cbuf) {
duke@0 1310 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1311 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1312 } else {
duke@0 1313 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1314 }
duke@0 1315 emit_opcode(*cbuf, 0x89);
duke@0 1316 encode_RegMem(*cbuf,
duke@0 1317 Matcher::_regEncode[src_first],
duke@0 1318 RSP_enc, 0x4, 0, offset,
duke@0 1319 false);
duke@0 1320 #ifndef PRODUCT
duke@0 1321 } else if (!do_size) {
duke@0 1322 st->print("movq [rsp + #%d], %s\t# spill",
duke@0 1323 offset,
duke@0 1324 Matcher::regName[src_first]);
duke@0 1325 #endif
duke@0 1326 }
duke@0 1327 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1328 } else {
duke@0 1329 // 32-bit
duke@0 1330 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1331 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1332 int offset = ra_->reg2offset(dst_first);
duke@0 1333 if (cbuf) {
duke@0 1334 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1335 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1336 }
duke@0 1337 emit_opcode(*cbuf, 0x89);
duke@0 1338 encode_RegMem(*cbuf,
duke@0 1339 Matcher::_regEncode[src_first],
duke@0 1340 RSP_enc, 0x4, 0, offset,
duke@0 1341 false);
duke@0 1342 #ifndef PRODUCT
duke@0 1343 } else if (!do_size) {
duke@0 1344 st->print("movl [rsp + #%d], %s\t# spill",
duke@0 1345 offset,
duke@0 1346 Matcher::regName[src_first]);
duke@0 1347 #endif
duke@0 1348 }
duke@0 1349 return
duke@0 1350 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1351 ((Matcher::_regEncode[src_first] < 8)
duke@0 1352 ? 3
duke@0 1353 : 4); // REX
duke@0 1354 }
duke@0 1355 } else if (dst_first_rc == rc_int) {
duke@0 1356 // gpr -> gpr
duke@0 1357 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1358 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1359 // 64-bit
duke@0 1360 if (cbuf) {
duke@0 1361 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1362 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1363 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1364 } else {
duke@0 1365 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1366 }
duke@0 1367 } else {
duke@0 1368 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1369 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1370 } else {
duke@0 1371 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1372 }
duke@0 1373 }
duke@0 1374 emit_opcode(*cbuf, 0x8B);
duke@0 1375 emit_rm(*cbuf, 0x3,
duke@0 1376 Matcher::_regEncode[dst_first] & 7,
duke@0 1377 Matcher::_regEncode[src_first] & 7);
duke@0 1378 #ifndef PRODUCT
duke@0 1379 } else if (!do_size) {
duke@0 1380 st->print("movq %s, %s\t# spill",
duke@0 1381 Matcher::regName[dst_first],
duke@0 1382 Matcher::regName[src_first]);
duke@0 1383 #endif
duke@0 1384 }
duke@0 1385 return 3; // REX
duke@0 1386 } else {
duke@0 1387 // 32-bit
duke@0 1388 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1389 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1390 if (cbuf) {
duke@0 1391 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1392 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1393 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1394 }
duke@0 1395 } else {
duke@0 1396 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1397 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1398 } else {
duke@0 1399 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1400 }
duke@0 1401 }
duke@0 1402 emit_opcode(*cbuf, 0x8B);
duke@0 1403 emit_rm(*cbuf, 0x3,
duke@0 1404 Matcher::_regEncode[dst_first] & 7,
duke@0 1405 Matcher::_regEncode[src_first] & 7);
duke@0 1406 #ifndef PRODUCT
duke@0 1407 } else if (!do_size) {
duke@0 1408 st->print("movl %s, %s\t# spill",
duke@0 1409 Matcher::regName[dst_first],
duke@0 1410 Matcher::regName[src_first]);
duke@0 1411 #endif
duke@0 1412 }
duke@0 1413 return
duke@0 1414 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1415 ? 2
duke@0 1416 : 3; // REX
duke@0 1417 }
duke@0 1418 } else if (dst_first_rc == rc_float) {
duke@0 1419 // gpr -> xmm
duke@0 1420 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1421 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1422 // 64-bit
duke@0 1423 if (cbuf) {
kvn@2953 1424 MacroAssembler _masm(cbuf);
kvn@2953 1425 __ movdq( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
duke@0 1426 #ifndef PRODUCT
duke@0 1427 } else if (!do_size) {
duke@0 1428 st->print("movdq %s, %s\t# spill",
duke@0 1429 Matcher::regName[dst_first],
duke@0 1430 Matcher::regName[src_first]);
duke@0 1431 #endif
duke@0 1432 }
duke@0 1433 return 5; // REX
duke@0 1434 } else {
duke@0 1435 // 32-bit
duke@0 1436 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1437 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1438 if (cbuf) {
kvn@2953 1439 MacroAssembler _masm(cbuf);
kvn@2953 1440 __ movdl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
duke@0 1441 #ifndef PRODUCT
duke@0 1442 } else if (!do_size) {
duke@0 1443 st->print("movdl %s, %s\t# spill",
duke@0 1444 Matcher::regName[dst_first],
duke@0 1445 Matcher::regName[src_first]);
duke@0 1446 #endif
duke@0 1447 }
duke@0 1448 return
kvn@2953 1449 (Matcher::_regEncode[src_first] >= 8 || Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1450 ? 5
kvn@2953 1451 : (4 + ((UseAVX>0)?1:0)); // REX
duke@0 1452 }
duke@0 1453 }
duke@0 1454 } else if (src_first_rc == rc_float) {
duke@0 1455 // xmm ->
duke@0 1456 if (dst_first_rc == rc_stack) {
duke@0 1457 // xmm -> mem
duke@0 1458 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1459 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1460 // 64-bit
duke@0 1461 int offset = ra_->reg2offset(dst_first);
duke@0 1462 if (cbuf) {
kvn@2953 1463 MacroAssembler _masm(cbuf);
kvn@2953 1464 __ movdbl( Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1465 #ifndef PRODUCT
duke@0 1466 } else if (!do_size) {
duke@0 1467 st->print("movsd [rsp + #%d], %s\t# spill",
duke@0 1468 offset,
duke@0 1469 Matcher::regName[src_first]);
duke@0 1470 #endif
duke@0 1471 }
duke@0 1472 return
duke@0 1473 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
kvn@2953 1474 ((Matcher::_regEncode[src_first] >= 8)
kvn@2953 1475 ? 6
kvn@2953 1476 : (5 + ((UseAVX>0)?1:0))); // REX
duke@0 1477 } else {
duke@0 1478 // 32-bit
duke@0 1479 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1480 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1481 int offset = ra_->reg2offset(dst_first);
duke@0 1482 if (cbuf) {
kvn@2953 1483 MacroAssembler _masm(cbuf);
kvn@2953 1484 __ movflt(Address(rsp, offset), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1485 #ifndef PRODUCT
duke@0 1486 } else if (!do_size) {
duke@0 1487 st->print("movss [rsp + #%d], %s\t# spill",
duke@0 1488 offset,
duke@0 1489 Matcher::regName[src_first]);
duke@0 1490 #endif
duke@0 1491 }
duke@0 1492 return
duke@0 1493 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
kvn@2953 1494 ((Matcher::_regEncode[src_first] >=8)
kvn@2953 1495 ? 6
kvn@2953 1496 : (5 + ((UseAVX>0)?1:0))); // REX
duke@0 1497 }
duke@0 1498 } else if (dst_first_rc == rc_int) {
duke@0 1499 // xmm -> gpr
duke@0 1500 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1501 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1502 // 64-bit
duke@0 1503 if (cbuf) {
kvn@2953 1504 MacroAssembler _masm(cbuf);
kvn@2953 1505 __ movdq( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1506 #ifndef PRODUCT
duke@0 1507 } else if (!do_size) {
duke@0 1508 st->print("movdq %s, %s\t# spill",
duke@0 1509 Matcher::regName[dst_first],
duke@0 1510 Matcher::regName[src_first]);
duke@0 1511 #endif
duke@0 1512 }
duke@0 1513 return 5; // REX
duke@0 1514 } else {
duke@0 1515 // 32-bit
duke@0 1516 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1517 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1518 if (cbuf) {
kvn@2953 1519 MacroAssembler _masm(cbuf);
kvn@2953 1520 __ movdl( as_Register(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1521 #ifndef PRODUCT
duke@0 1522 } else if (!do_size) {
duke@0 1523 st->print("movdl %s, %s\t# spill",
duke@0 1524 Matcher::regName[dst_first],
duke@0 1525 Matcher::regName[src_first]);
duke@0 1526 #endif
duke@0 1527 }
duke@0 1528 return
kvn@2953 1529 (Matcher::_regEncode[src_first] >= 8 || Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1530 ? 5
kvn@2953 1531 : (4 + ((UseAVX>0)?1:0)); // REX
duke@0 1532 }
duke@0 1533 } else if (dst_first_rc == rc_float) {
duke@0 1534 // xmm -> xmm
duke@0 1535 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1536 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1537 // 64-bit
duke@0 1538 if (cbuf) {
kvn@2953 1539 MacroAssembler _masm(cbuf);
kvn@2953 1540 __ movdbl( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1541 #ifndef PRODUCT
duke@0 1542 } else if (!do_size) {
duke@0 1543 st->print("%s %s, %s\t# spill",
duke@0 1544 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
duke@0 1545 Matcher::regName[dst_first],
duke@0 1546 Matcher::regName[src_first]);
duke@0 1547 #endif
duke@0 1548 }
duke@0 1549 return
kvn@2953 1550 (Matcher::_regEncode[src_first] >= 8 || Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1551 ? 5
kvn@2953 1552 : (4 + ((UseAVX>0)?1:0)); // REX
duke@0 1553 } else {
duke@0 1554 // 32-bit
duke@0 1555 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1556 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1557 if (cbuf) {
kvn@2953 1558 MacroAssembler _masm(cbuf);
kvn@2953 1559 __ movflt( as_XMMRegister(Matcher::_regEncode[dst_first]), as_XMMRegister(Matcher::_regEncode[src_first]));
duke@0 1560 #ifndef PRODUCT
duke@0 1561 } else if (!do_size) {
duke@0 1562 st->print("%s %s, %s\t# spill",
duke@0 1563 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
duke@0 1564 Matcher::regName[dst_first],
duke@0 1565 Matcher::regName[src_first]);
duke@0 1566 #endif
duke@0 1567 }
kvn@2953 1568 return ((UseAVX>0) ? 5:
kvn@2953 1569 ((Matcher::_regEncode[src_first] >= 8 || Matcher::_regEncode[dst_first] >= 8)
kvn@2953 1570 ? (UseXmmRegToRegMoveAll ? 4 : 5)
kvn@2953 1571 : (UseXmmRegToRegMoveAll ? 3 : 4))); // REX
duke@0 1572 }
duke@0 1573 }
duke@0 1574 }
duke@0 1575
duke@0 1576 assert(0," foo ");
duke@0 1577 Unimplemented();
duke@0 1578
duke@0 1579 return 0;
duke@0 1580 }
duke@0 1581
duke@0 1582 #ifndef PRODUCT
duke@0 1583 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
duke@0 1584 {
duke@0 1585 implementation(NULL, ra_, false, st);
duke@0 1586 }
duke@0 1587 #endif
duke@0 1588
duke@0 1589 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 1590 {
duke@0 1591 implementation(&cbuf, ra_, false, NULL);
duke@0 1592 }
duke@0 1593
duke@0 1594 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
duke@0 1595 {
duke@0 1596 return implementation(NULL, ra_, true, NULL);
duke@0 1597 }
duke@0 1598
duke@0 1599 //=============================================================================
duke@0 1600 #ifndef PRODUCT
duke@0 1601 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 1602 {
duke@0 1603 st->print("nop \t# %d bytes pad for loops and calls", _count);
duke@0 1604 }
duke@0 1605 #endif
duke@0 1606
duke@0 1607 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
duke@0 1608 {
duke@0 1609 MacroAssembler _masm(&cbuf);
duke@0 1610 __ nop(_count);
duke@0 1611 }
duke@0 1612
duke@0 1613 uint MachNopNode::size(PhaseRegAlloc*) const
duke@0 1614 {
duke@0 1615 return _count;
duke@0 1616 }
duke@0 1617
duke@0 1618
duke@0 1619 //=============================================================================
duke@0 1620 #ifndef PRODUCT
duke@0 1621 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1622 {
duke@0 1623 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1624 int reg = ra_->get_reg_first(this);
duke@0 1625 st->print("leaq %s, [rsp + #%d]\t# box lock",
duke@0 1626 Matcher::regName[reg], offset);
duke@0 1627 }
duke@0 1628 #endif
duke@0 1629
duke@0 1630 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1631 {
duke@0 1632 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1633 int reg = ra_->get_encode(this);
duke@0 1634 if (offset >= 0x80) {
duke@0 1635 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1636 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1637 emit_rm(cbuf, 0x2, reg & 7, 0x04);
duke@0 1638 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1639 emit_d32(cbuf, offset);
duke@0 1640 } else {
duke@0 1641 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1642 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1643 emit_rm(cbuf, 0x1, reg & 7, 0x04);
duke@0 1644 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1645 emit_d8(cbuf, offset);
duke@0 1646 }
duke@0 1647 }
duke@0 1648
duke@0 1649 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
duke@0 1650 {
duke@0 1651 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1652 return (offset < 0x80) ? 5 : 8; // REX
duke@0 1653 }
duke@0 1654
duke@0 1655 //=============================================================================
duke@0 1656
duke@0 1657 // emit call stub, compiled java to interpreter
duke@0 1658 void emit_java_to_interp(CodeBuffer& cbuf)
duke@0 1659 {
duke@0 1660 // Stub is fixed up when the corresponding call is converted from
duke@0 1661 // calling compiled code to calling interpreted code.
duke@0 1662 // movq rbx, 0
duke@0 1663 // jmp -5 # to self
duke@0 1664
twisti@1668 1665 address mark = cbuf.insts_mark(); // get mark within main instrs section
twisti@1668 1666
twisti@1668 1667 // Note that the code buffer's insts_mark is always relative to insts.
duke@0 1668 // That's why we must use the macroassembler to generate a stub.
duke@0 1669 MacroAssembler _masm(&cbuf);
duke@0 1670
duke@0 1671 address base =
duke@0 1672 __ start_a_stub(Compile::MAX_stubs_size);
duke@0 1673 if (base == NULL) return; // CodeBuffer::expand failed
duke@0 1674 // static stub relocation stores the instruction address of the call
duke@0 1675 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
duke@0 1676 // static stub relocation also tags the methodOop in the code-stream.
duke@0 1677 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
never@304 1678 // This is recognized as unresolved by relocs/nativeinst/ic code
duke@0 1679 __ jump(RuntimeAddress(__ pc()));
duke@0 1680
twisti@1668 1681 // Update current stubs pointer and restore insts_end.
duke@0 1682 __ end_a_stub();
duke@0 1683 }
duke@0 1684
duke@0 1685 // size of call stub, compiled java to interpretor
duke@0 1686 uint size_java_to_interp()
duke@0 1687 {
duke@0 1688 return 15; // movq (1+1+8); jmp (1+4)
duke@0 1689 }
duke@0 1690
duke@0 1691 // relocation entries for call stub, compiled java to interpretor
duke@0 1692 uint reloc_java_to_interp()
duke@0 1693 {
duke@0 1694 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
duke@0 1695 }
duke@0 1696
duke@0 1697 //=============================================================================
duke@0 1698 #ifndef PRODUCT
duke@0 1699 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1700 {
coleenp@113 1701 if (UseCompressedOops) {
kvn@1491 1702 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
kvn@642 1703 if (Universe::narrow_oop_shift() != 0) {
kvn@1491 1704 st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1");
kvn@1491 1705 }
kvn@1491 1706 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
coleenp@113 1707 } else {
kvn@1491 1708 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
kvn@1491 1709 "# Inline cache check");
coleenp@113 1710 }
duke@0 1711 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
kvn@1491 1712 st->print_cr("\tnop\t# nops to align entry point");
duke@0 1713 }
duke@0 1714 #endif
duke@0 1715
duke@0 1716 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1717 {
duke@0 1718 MacroAssembler masm(&cbuf);
twisti@1668 1719 uint insts_size = cbuf.insts_size();
coleenp@113 1720 if (UseCompressedOops) {
coleenp@113 1721 masm.load_klass(rscratch1, j_rarg0);
never@304 1722 masm.cmpptr(rax, rscratch1);
coleenp@113 1723 } else {
never@304 1724 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
coleenp@113 1725 }
duke@0 1726
duke@0 1727 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
duke@0 1728
duke@0 1729 /* WARNING these NOPs are critical so that verified entry point is properly
kvn@1491 1730 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
twisti@1668 1731 int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
kvn@1491 1732 if (OptoBreakpoint) {
duke@0 1733 // Leave space for int3
kvn@1491 1734 nops_cnt -= 1;
duke@0 1735 }
kvn@1491 1736 nops_cnt &= 0x3; // Do not add nops if code is aligned.
kvn@1491 1737 if (nops_cnt > 0)
kvn@1491 1738 masm.nop(nops_cnt);
duke@0 1739 }
duke@0 1740
duke@0 1741 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
duke@0 1742 {
kvn@1491 1743 return MachNode::size(ra_); // too many variables; just compute it
kvn@1491 1744 // the hard way
duke@0 1745 }
duke@0 1746
duke@0 1747
duke@0 1748 //=============================================================================
duke@0 1749 uint size_exception_handler()
duke@0 1750 {
duke@0 1751 // NativeCall instruction size is the same as NativeJump.
duke@0 1752 // Note that this value is also credited (in output.cpp) to
duke@0 1753 // the size of the code section.
duke@0 1754 return NativeJump::instruction_size;
duke@0 1755 }
duke@0 1756
duke@0 1757 // Emit exception handler code.
duke@0 1758 int emit_exception_handler(CodeBuffer& cbuf)
duke@0 1759 {
duke@0 1760
twisti@1668 1761 // Note that the code buffer's insts_mark is always relative to insts.
duke@0 1762 // That's why we must use the macroassembler to generate a handler.
duke@0 1763 MacroAssembler _masm(&cbuf);
duke@0 1764 address base =
duke@0 1765 __ start_a_stub(size_exception_handler());
duke@0 1766 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1767 int offset = __ offset();
twisti@1668 1768 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
duke@0 1769 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
duke@0 1770 __ end_a_stub();
duke@0 1771 return offset;
duke@0 1772 }
duke@0 1773
duke@0 1774 uint size_deopt_handler()
duke@0 1775 {
duke@0 1776 // three 5 byte instructions
duke@0 1777 return 15;
duke@0 1778 }
duke@0 1779
duke@0 1780 // Emit deopt handler code.
duke@0 1781 int emit_deopt_handler(CodeBuffer& cbuf)
duke@0 1782 {
duke@0 1783
twisti@1668 1784 // Note that the code buffer's insts_mark is always relative to insts.
duke@0 1785 // That's why we must use the macroassembler to generate a handler.
duke@0 1786 MacroAssembler _masm(&cbuf);
duke@0 1787 address base =
duke@0 1788 __ start_a_stub(size_deopt_handler());
duke@0 1789 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1790 int offset = __ offset();
duke@0 1791 address the_pc = (address) __ pc();
duke@0 1792 Label next;
duke@0 1793 // push a "the_pc" on the stack without destroying any registers
duke@0 1794 // as they all may be live.
duke@0 1795
duke@0 1796 // push address of "next"
duke@0 1797 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
duke@0 1798 __ bind(next);
duke@0 1799 // adjust it so it matches "the_pc"
never@304 1800 __ subptr(Address(rsp, 0), __ offset() - offset);
duke@0 1801 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
duke@0 1802 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
duke@0 1803 __ end_a_stub();
duke@0 1804 return offset;
duke@0 1805 }
duke@0 1806
duke@0 1807
twisti@775 1808 const bool Matcher::match_rule_supported(int opcode) {
twisti@775 1809 if (!has_match_rule(opcode))
twisti@775 1810 return false;
twisti@775 1811
twisti@775 1812 return true; // Per default match rules are supported.
twisti@775 1813 }
twisti@775 1814
duke@0 1815 int Matcher::regnum_to_fpu_offset(int regnum)
duke@0 1816 {
duke@0 1817 return regnum - 32; // The FP registers are in the second chunk
duke@0 1818 }
duke@0 1819
duke@0 1820 // This is UltraSparc specific, true just means we have fast l2f conversion
duke@0 1821 const bool Matcher::convL2FSupported(void) {
duke@0 1822 return true;
duke@0 1823 }
duke@0 1824
duke@0 1825 // Vector width in bytes
duke@0 1826 const uint Matcher::vector_width_in_bytes(void) {
duke@0 1827 return 8;
duke@0 1828 }
duke@0 1829
duke@0 1830 // Vector ideal reg
duke@0 1831 const uint Matcher::vector_ideal_reg(void) {
duke@0 1832 return Op_RegD;
duke@0 1833 }
duke@0 1834
duke@0 1835 // Is this branch offset short enough that a short branch can be used?
duke@0 1836 //
duke@0 1837 // NOTE: If the platform does not provide any short branch variants, then
duke@0 1838 // this method should return false for offset 0.
kvn@2614 1839 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
kvn@2614 1840 // The passed offset is relative to address of the branch.
kvn@2614 1841 // On 86 a branch displacement is calculated relative to address
kvn@2614 1842 // of a next instruction.
kvn@2614 1843 offset -= br_size;
kvn@2614 1844
never@415 1845 // the short version of jmpConUCF2 contains multiple branches,
never@415 1846 // making the reach slightly less
never@415 1847 if (rule == jmpConUCF2_rule)
never@415 1848 return (-126 <= offset && offset <= 125);
never@415 1849 return (-128 <= offset && offset <= 127);
duke@0 1850 }
duke@0 1851
duke@0 1852 const bool Matcher::isSimpleConstant64(jlong value) {
duke@0 1853 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
duke@0 1854 //return value == (int) value; // Cf. storeImmL and immL32.
duke@0 1855
duke@0 1856 // Probably always true, even if a temp register is required.
duke@0 1857 return true;
duke@0 1858 }
duke@0 1859
duke@0 1860 // The ecx parameter to rep stosq for the ClearArray node is in words.
duke@0 1861 const bool Matcher::init_array_count_is_in_bytes = false;
duke@0 1862
duke@0 1863 // Threshold size for cleararray.
duke@0 1864 const int Matcher::init_array_short_size = 8 * BytesPerLong;
duke@0 1865
kvn@2808 1866 // No additional cost for CMOVL.
kvn@2808 1867 const int Matcher::long_cmove_cost() { return 0; }
kvn@2808 1868
kvn@2808 1869 // No CMOVF/CMOVD with SSE2
kvn@2808 1870 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
kvn@2808 1871
duke@0 1872 // Should the Matcher clone shifts on addressing modes, expecting them
duke@0 1873 // to be subsumed into complex addressing expressions or compute them
duke@0 1874 // into registers? True for Intel but false for most RISCs
duke@0 1875 const bool Matcher::clone_shift_expressions = true;
duke@0 1876
roland@2248 1877 // Do we need to mask the count passed to shift instructions or does
roland@2248 1878 // the cpu only look at the lower 5/6 bits anyway?
roland@2248 1879 const bool Matcher::need_masked_shift_count = false;
roland@2248 1880
kvn@1495 1881 bool Matcher::narrow_oop_use_complex_address() {
kvn@1495 1882 assert(UseCompressedOops, "only for compressed oops code");
kvn@1495 1883 return (LogMinObjAlignmentInBytes <= 3);
kvn@1495 1884 }
kvn@1495 1885
duke@0 1886 // Is it better to copy float constants, or load them directly from
duke@0 1887 // memory? Intel can load a float constant from a direct address,
duke@0 1888 // requiring no extra registers. Most RISCs will have to materialize
duke@0 1889 // an address into a register first, so they would do better to copy
duke@0 1890 // the constant from stack.
duke@0 1891 const bool Matcher::rematerialize_float_constants = true; // XXX
duke@0 1892
duke@0 1893 // If CPU can load and store mis-aligned doubles directly then no
duke@0 1894 // fixup is needed. Else we split the double into 2 integer pieces
duke@0 1895 // and move it piece-by-piece. Only happens when passing doubles into
duke@0 1896 // C code as the Java calling convention forces doubles to be aligned.
duke@0 1897 const bool Matcher::misaligned_doubles_ok = true;
duke@0 1898
duke@0 1899 // No-op on amd64
duke@0 1900 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
duke@0 1901
duke@0 1902 // Advertise here if the CPU requires explicit rounding operations to
duke@0 1903 // implement the UseStrictFP mode.
duke@0 1904 const bool Matcher::strict_fp_requires_explicit_rounding = true;
duke@0 1905
kvn@1274 1906 // Are floats conerted to double when stored to stack during deoptimization?
kvn@1274 1907 // On x64 it is stored without convertion so we can use normal access.
kvn@1274 1908 bool Matcher::float_in_double() { return false; }
kvn@1274 1909
duke@0 1910 // Do ints take an entire long register or just half?
duke@0 1911 const bool Matcher::int_in_long = true;
duke@0 1912
duke@0 1913 // Return whether or not this register is ever used as an argument.
duke@0 1914 // This function is used on startup to build the trampoline stubs in
duke@0 1915 // generateOptoStub. Registers not mentioned will be killed by the VM
duke@0 1916 // call in the trampoline, and arguments in those registers not be
duke@0 1917 // available to the callee.
duke@0 1918 bool Matcher::can_be_java_arg(int reg)
duke@0 1919 {
duke@0 1920 return
duke@0 1921 reg == RDI_num || reg == RDI_H_num ||
duke@0 1922 reg == RSI_num || reg == RSI_H_num ||
duke@0 1923 reg == RDX_num || reg == RDX_H_num ||
duke@0 1924 reg == RCX_num || reg == RCX_H_num ||
duke@0 1925 reg == R8_num || reg == R8_H_num ||
duke@0 1926 reg == R9_num || reg == R9_H_num ||
coleenp@113 1927 reg == R12_num || reg == R12_H_num ||
duke@0 1928 reg == XMM0_num || reg == XMM0_H_num ||
duke@0 1929 reg == XMM1_num || reg == XMM1_H_num ||
duke@0 1930 reg == XMM2_num || reg == XMM2_H_num ||
duke@0 1931 reg == XMM3_num || reg == XMM3_H_num ||
duke@0 1932 reg == XMM4_num || reg == XMM4_H_num ||
duke@0 1933 reg == XMM5_num || reg == XMM5_H_num ||
duke@0 1934 reg == XMM6_num || reg == XMM6_H_num ||
duke@0 1935 reg == XMM7_num || reg == XMM7_H_num;
duke@0 1936 }
duke@0 1937
duke@0 1938 bool Matcher::is_spillable_arg(int reg)
duke@0 1939 {
duke@0 1940 return can_be_java_arg(reg);
duke@0 1941 }
duke@0 1942
kvn@1834 1943 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
kvn@1834 1944 // In 64 bit mode a code which use multiply when
kvn@1834 1945 // devisor is constant is faster than hardware
kvn@1834 1946 // DIV instruction (it uses MulHiL).
kvn@1834 1947 return false;
kvn@1834 1948 }
kvn@1834 1949
duke@0 1950 // Register for DIVI projection of divmodI
duke@0 1951 RegMask Matcher::divI_proj_mask() {
roland@2882 1952 return INT_RAX_REG_mask();
duke@0 1953 }
duke@0 1954
duke@0 1955 // Register for MODI projection of divmodI
duke@0 1956 RegMask Matcher::modI_proj_mask() {
roland@2882 1957 return INT_RDX_REG_mask();
duke@0 1958 }
duke@0 1959
duke@0 1960 // Register for DIVL projection of divmodL
duke@0 1961 RegMask Matcher::divL_proj_mask() {
roland@2882 1962 return LONG_RAX_REG_mask();
duke@0 1963 }
duke@0 1964
duke@0 1965 // Register for MODL projection of divmodL
duke@0 1966 RegMask Matcher::modL_proj_mask() {
roland@2882 1967 return LONG_RDX_REG_mask();
duke@0 1968 }
duke@0 1969
twisti@1137 1970 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
roland@2882 1971 return PTR_RBP_REG_mask();
twisti@1137 1972 }
twisti@1137 1973
coleenp@113 1974 static Address build_address(int b, int i, int s, int d) {
coleenp@113 1975 Register index = as_Register(i);
coleenp@113 1976 Address::ScaleFactor scale = (Address::ScaleFactor)s;
coleenp@113 1977 if (index == rsp) {
coleenp@113 1978 index = noreg;
coleenp@113 1979 scale = Address::no_scale;
coleenp@113 1980 }
coleenp@113 1981 Address addr(as_Register(b), index, scale, d);
coleenp@113 1982 return addr;
coleenp@113 1983 }
coleenp@113 1984
duke@0 1985 %}
duke@0 1986
duke@0 1987 //----------ENCODING BLOCK-----------------------------------------------------
duke@0 1988 // This block specifies the encoding classes used by the compiler to
duke@0 1989 // output byte streams. Encoding classes are parameterized macros
duke@0 1990 // used by Machine Instruction Nodes in order to generate the bit
duke@0 1991 // encoding of the instruction. Operands specify their base encoding
duke@0 1992 // interface with the interface keyword. There are currently
duke@0 1993 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
duke@0 1994 // COND_INTER. REG_INTER causes an operand to generate a function
duke@0 1995 // which returns its register number when queried. CONST_INTER causes
duke@0 1996 // an operand to generate a function which returns the value of the
duke@0 1997 // constant when queried. MEMORY_INTER causes an operand to generate
duke@0 1998 // four functions which return the Base Register, the Index Register,
duke@0 1999 // the Scale Value, and the Offset Value of the operand when queried.
duke@0 2000 // COND_INTER causes an operand to generate six functions which return
duke@0 2001 // the encoding code (ie - encoding bits for the instruction)
duke@0 2002 // associated with each basic boolean condition for a conditional
duke@0 2003 // instruction.
duke@0 2004 //
duke@0 2005 // Instructions specify two basic values for encoding. Again, a
duke@0 2006 // function is available to check if the constant displacement is an
duke@0 2007 // oop. They use the ins_encode keyword to specify their encoding
duke@0 2008 // classes (which must be a sequence of enc_class names, and their
duke@0 2009 // parameters, specified in the encoding block), and they use the
duke@0 2010 // opcode keyword to specify, in order, their primary, secondary, and
duke@0 2011 // tertiary opcode. Only the opcode sections which a particular
duke@0 2012 // instruction needs for encoding need to be specified.
duke@0 2013 encode %{
duke@0 2014 // Build emit functions for each basic byte or larger field in the
duke@0 2015 // intel encoding scheme (opcode, rm, sib, immediate), and call them
duke@0 2016 // from C++ code in the enc_class source block. Emit functions will
duke@0 2017 // live in the main source block for now. In future, we can
duke@0 2018 // generalize this by adding a syntax that specifies the sizes of
duke@0 2019 // fields in an order, so that the adlc can build the emit functions
duke@0 2020 // automagically
duke@0 2021
duke@0 2022 // Emit primary opcode
duke@0 2023 enc_class OpcP
duke@0 2024 %{
duke@0 2025 emit_opcode(cbuf, $primary);
duke@0 2026 %}
duke@0 2027
duke@0 2028 // Emit secondary opcode
duke@0 2029 enc_class OpcS
duke@0 2030 %{
duke@0 2031 emit_opcode(cbuf, $secondary);
duke@0 2032 %}
duke@0 2033
duke@0 2034 // Emit tertiary opcode
duke@0 2035 enc_class OpcT
duke@0 2036 %{
duke@0 2037 emit_opcode(cbuf, $tertiary);
duke@0 2038 %}
duke@0 2039
duke@0 2040 // Emit opcode directly
duke@0 2041 enc_class Opcode(immI d8)
duke@0 2042 %{
duke@0 2043 emit_opcode(cbuf, $d8$$constant);
duke@0 2044 %}
duke@0 2045
duke@0 2046 // Emit size prefix
duke@0 2047 enc_class SizePrefix
duke@0 2048 %{
duke@0 2049 emit_opcode(cbuf, 0x66);
duke@0 2050 %}
duke@0 2051
duke@0 2052 enc_class reg(rRegI reg)
duke@0 2053 %{
duke@0 2054 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
duke@0 2055 %}
duke@0 2056
duke@0 2057 enc_class reg_reg(rRegI dst, rRegI src)
duke@0 2058 %{
duke@0 2059 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2060 %}
duke@0 2061
duke@0 2062 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
duke@0 2063 %{
duke@0 2064 emit_opcode(cbuf, $opcode$$constant);
duke@0 2065 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2066 %}
duke@0 2067
duke@0 2068 enc_class cdql_enc(no_rax_rdx_RegI div)
duke@0 2069 %{
duke@0 2070 // Full implementation of Java idiv and irem; checks for
duke@0 2071 // special case as described in JVM spec., p.243 & p.271.
duke@0 2072 //
duke@0 2073 // normal case special case
duke@0 2074 //
duke@0 2075 // input : rax: dividend min_int
duke@0 2076 // reg: divisor -1
duke@0 2077 //
duke@0 2078 // output: rax: quotient (= rax idiv reg) min_int
duke@0 2079 // rdx: remainder (= rax irem reg) 0
duke@0 2080 //
duke@0 2081 // Code sequnce:
duke@0 2082 //
duke@0 2083 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
duke@0 2084 // 5: 75 07/08 jne e <normal>
duke@0 2085 // 7: 33 d2 xor %edx,%edx
duke@0 2086 // [div >= 8 -> offset + 1]
duke@0 2087 // [REX_B]
duke@0 2088 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2089 // c: 74 03/04 je 11 <done>
duke@0 2090 // 000000000000000e <normal>:
duke@0 2091 // e: 99 cltd
duke@0 2092 // [div >= 8 -> offset + 1]
duke@0 2093 // [REX_B]
duke@0 2094 // f: f7 f9 idiv $div
duke@0 2095 // 0000000000000011 <done>:
duke@0 2096
duke@0 2097 // cmp $0x80000000,%eax
duke@0 2098 emit_opcode(cbuf, 0x3d);
duke@0 2099 emit_d8(cbuf, 0x00);
duke@0 2100 emit_d8(cbuf, 0x00);
duke@0 2101 emit_d8(cbuf, 0x00);
duke@0 2102 emit_d8(cbuf, 0x80);
duke@0 2103
duke@0 2104 // jne e <normal>
duke@0 2105 emit_opcode(cbuf, 0x75);
duke@0 2106 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
duke@0 2107
duke@0 2108 // xor %edx,%edx
duke@0 2109 emit_opcode(cbuf, 0x33);
duke@0 2110 emit_d8(cbuf, 0xD2);
duke@0 2111
duke@0 2112 // cmp $0xffffffffffffffff,%ecx
duke@0 2113 if ($div$$reg >= 8) {
duke@0 2114 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2115 }
duke@0 2116 emit_opcode(cbuf, 0x83);
duke@0 2117 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2118 emit_d8(cbuf, 0xFF);
duke@0 2119
duke@0 2120 // je 11 <done>
duke@0 2121 emit_opcode(cbuf, 0x74);
duke@0 2122 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
duke@0 2123
duke@0 2124 // <normal>
duke@0 2125 // cltd
duke@0 2126 emit_opcode(cbuf, 0x99);
duke@0 2127
duke@0 2128 // idivl (note: must be emitted by the user of this rule)
duke@0 2129 // <done>
duke@0 2130 %}
duke@0 2131
duke@0 2132 enc_class cdqq_enc(no_rax_rdx_RegL div)
duke@0 2133 %{
duke@0 2134 // Full implementation of Java ldiv and lrem; checks for
duke@0 2135 // special case as described in JVM spec., p.243 & p.271.
duke@0 2136 //
duke@0 2137 // normal case special case
duke@0 2138 //
duke@0 2139 // input : rax: dividend min_long
duke@0 2140 // reg: divisor -1
duke@0 2141 //
duke@0 2142 // output: rax: quotient (= rax idiv reg) min_long
duke@0 2143 // rdx: remainder (= rax irem reg) 0
duke@0 2144 //
duke@0 2145 // Code sequnce:
duke@0 2146 //
duke@0 2147 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
duke@0 2148 // 7: 00 00 80
duke@0 2149 // a: 48 39 d0 cmp %rdx,%rax
duke@0 2150 // d: 75 08 jne 17 <normal>
duke@0 2151 // f: 33 d2 xor %edx,%edx
duke@0 2152 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2153 // 15: 74 05 je 1c <done>
duke@0 2154 // 0000000000000017 <normal>:
duke@0 2155 // 17: 48 99 cqto
duke@0 2156 // 19: 48 f7 f9 idiv $div
duke@0 2157 // 000000000000001c <done>:
duke@0 2158
duke@0 2159 // mov $0x8000000000000000,%rdx
duke@0 2160 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2161 emit_opcode(cbuf, 0xBA);
duke@0 2162 emit_d8(cbuf, 0x00);
duke@0 2163 emit_d8(cbuf, 0x00);
duke@0 2164 emit_d8(cbuf, 0x00);
duke@0 2165 emit_d8(cbuf, 0x00);
duke@0 2166 emit_d8(cbuf, 0x00);
duke@0 2167 emit_d8(cbuf, 0x00);
duke@0 2168 emit_d8(cbuf, 0x00);
duke@0 2169 emit_d8(cbuf, 0x80);
duke@0 2170
duke@0 2171 // cmp %rdx,%rax
duke@0 2172 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2173 emit_opcode(cbuf, 0x39);
duke@0 2174 emit_d8(cbuf, 0xD0);
duke@0 2175
duke@0 2176 // jne 17 <normal>
duke@0 2177 emit_opcode(cbuf, 0x75);
duke@0 2178 emit_d8(cbuf, 0x08);
duke@0 2179
duke@0 2180 // xor %edx,%edx
duke@0 2181 emit_opcode(cbuf, 0x33);
duke@0 2182 emit_d8(cbuf, 0xD2);
duke@0 2183
duke@0 2184 // cmp $0xffffffffffffffff,$div
duke@0 2185 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
duke@0 2186 emit_opcode(cbuf, 0x83);
duke@0 2187 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2188 emit_d8(cbuf, 0xFF);
duke@0 2189
duke@0 2190 // je 1e <done>
duke@0 2191 emit_opcode(cbuf, 0x74);
duke@0 2192 emit_d8(cbuf, 0x05);
duke@0 2193
duke@0 2194 // <normal>
duke@0 2195 // cqto
duke@0 2196 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2197 emit_opcode(cbuf, 0x99);
duke@0 2198
duke@0 2199 // idivq (note: must be emitted by the user of this rule)
duke@0 2200 // <done>
duke@0 2201 %}
duke@0 2202
duke@0 2203 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
duke@0 2204 enc_class OpcSE(immI imm)
duke@0 2205 %{
duke@0 2206 // Emit primary opcode and set sign-extend bit
duke@0 2207 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2208 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2209 emit_opcode(cbuf, $primary | 0x02);
duke@0 2210 } else {
duke@0 2211 // 32-bit immediate
duke@0 2212 emit_opcode(cbuf, $primary);
duke@0 2213 }
duke@0 2214 %}
duke@0 2215
duke@0 2216 enc_class OpcSErm(rRegI dst, immI imm)
duke@0 2217 %{
duke@0 2218 // OpcSEr/m
duke@0 2219 int dstenc = $dst$$reg;
duke@0 2220 if (dstenc >= 8) {
duke@0 2221 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2222 dstenc -= 8;
duke@0 2223 }
duke@0 2224 // Emit primary opcode and set sign-extend bit
duke@0 2225 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2226 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2227 emit_opcode(cbuf, $primary | 0x02);
duke@0 2228 } else {
duke@0 2229 // 32-bit immediate
duke@0 2230 emit_opcode(cbuf, $primary);
duke@0 2231 }
duke@0 2232 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2233 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2234 %}
duke@0 2235
duke@0 2236 enc_class OpcSErm_wide(rRegL dst, immI imm)
duke@0 2237 %{
duke@0 2238 // OpcSEr/m
duke@0 2239 int dstenc = $dst$$reg;
duke@0 2240 if (dstenc < 8) {
duke@0 2241 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2242 } else {
duke@0 2243 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2244 dstenc -= 8;
duke@0 2245 }
duke@0 2246 // Emit primary opcode and set sign-extend bit
duke@0 2247 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2248 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2249 emit_opcode(cbuf, $primary | 0x02);
duke@0 2250 } else {
duke@0 2251 // 32-bit immediate
duke@0 2252 emit_opcode(cbuf, $primary);
duke@0 2253 }
duke@0 2254 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2255 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2256 %}
duke@0 2257
duke@0 2258 enc_class Con8or32(immI imm)
duke@0 2259 %{
duke@0 2260 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2261 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2262 $$$emit8$imm$$constant;
duke@0 2263 } else {
duke@0 2264 // 32-bit immediate
duke@0 2265 $$$emit32$imm$$constant;
duke@0 2266 }
duke@0 2267 %}
duke@0 2268
duke@0 2269 enc_class opc2_reg(rRegI dst)
duke@0 2270 %{
duke@0 2271 // BSWAP
duke@0 2272 emit_cc(cbuf, $secondary, $dst$$reg);
duke@0 2273 %}
duke@0 2274
duke@0 2275 enc_class opc3_reg(rRegI dst)
duke@0 2276 %{
duke@0 2277 // BSWAP
duke@0 2278 emit_cc(cbuf, $tertiary, $dst$$reg);
duke@0 2279 %}
duke@0 2280
duke@0 2281 enc_class reg_opc(rRegI div)
duke@0 2282 %{
duke@0 2283 // INC, DEC, IDIV, IMOD, JMP indirect, ...
duke@0 2284 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
duke@0 2285 %}
duke@0 2286
duke@0 2287 enc_class enc_cmov(cmpOp cop)
duke@0 2288 %{
duke@0 2289 // CMOV
duke@0 2290 $$$emit8$primary;
duke@0 2291 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2292 %}
duke@0 2293
duke@0 2294 enc_class enc_PartialSubtypeCheck()
duke@0 2295 %{
duke@0 2296 Register Rrdi = as_Register(RDI_enc); // result register
duke@0 2297 Register Rrax = as_Register(RAX_enc); // super class
duke@0 2298 Register Rrcx = as_Register(RCX_enc); // killed
duke@0 2299 Register Rrsi = as_Register(RSI_enc); // sub class
jrose@644 2300 Label miss;
jrose@644 2301 const bool set_cond_codes = true;
duke@0 2302
duke@0 2303 MacroAssembler _masm(&cbuf);
jrose@644 2304 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
jrose@644 2305 NULL, &miss,
jrose@644 2306 /*set_cond_codes:*/ true);
duke@0 2307 if ($primary) {
never@304 2308 __ xorptr(Rrdi, Rrdi);
duke@0 2309 }
duke@0 2310 __ bind(miss);
duke@0 2311 %}
duke@0 2312
duke@0 2313 enc_class Java_To_Interpreter(method meth)
duke@0 2314 %{
duke@0 2315 // CALL Java_To_Interpreter
duke@0 2316 // This is the instruction starting address for relocation info.
twisti@1668 2317 cbuf.set_insts_mark();
duke@0 2318 $$$emit8$primary;
duke@0 2319 // CALL directly to the runtime
duke@0 2320 emit_d32_reloc(cbuf,
twisti@1668 2321 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
duke@0 2322 runtime_call_Relocation::spec(),
duke@0 2323 RELOC_DISP32);
duke@0 2324 %}
duke@0 2325
twisti@1137 2326 enc_class preserve_SP %{
twisti@1668 2327 debug_only(int off0 = cbuf.insts_size());
twisti@1137 2328 MacroAssembler _masm(&cbuf);
twisti@1137 2329 // RBP is preserved across all calls, even compiled calls.
twisti@1137 2330 // Use it to preserve RSP in places where the callee might change the SP.
twisti@1487 2331 __ movptr(rbp_mh_SP_save, rsp);
twisti@1668 2332 debug_only(int off1 = cbuf.insts_size());
twisti@1137 2333 assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
twisti@1137 2334 %}
twisti@1137 2335
twisti@1137 2336 enc_class restore_SP %{
twisti@1137 2337 MacroAssembler _masm(&cbuf);
twisti@1487 2338 __ movptr(rsp, rbp_mh_SP_save);
twisti@1137 2339 %}
twisti@1137 2340
duke@0 2341 enc_class Java_Static_Call(method meth)
duke@0 2342 %{
duke@0 2343 // JAVA STATIC CALL
duke@0 2344 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
duke@0 2345 // determine who we intended to call.
twisti@1668 2346 cbuf.set_insts_mark();
duke@0 2347 $$$emit8$primary;
duke@0 2348
duke@0 2349 if (!_method) {
duke@0 2350 emit_d32_reloc(cbuf,
twisti@1668 2351 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
duke@0 2352 runtime_call_Relocation::spec(),
duke@0 2353 RELOC_DISP32);
duke@0 2354 } else if (_optimized_virtual) {
duke@0 2355 emit_d32_reloc(cbuf,
twisti@1668 2356 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
duke@0 2357 opt_virtual_call_Relocation::spec(),
duke@0 2358 RELOC_DISP32);
duke@0 2359 } else {
duke@0 2360 emit_d32_reloc(cbuf,
twisti@1668 2361 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
duke@0 2362 static_call_Relocation::spec(),
duke@0 2363 RELOC_DISP32);
duke@0 2364 }
duke@0 2365 if (_method) {
duke@0 2366 // Emit stub for static call
duke@0 2367 emit_java_to_interp(cbuf);
duke@0 2368 }
duke@0 2369 %}
duke@0 2370
duke@0 2371 enc_class Java_Dynamic_Call(method meth)
duke@0 2372 %{
duke@0 2373 // JAVA DYNAMIC CALL
duke@0 2374 // !!!!!
duke@0 2375 // Generate "movq rax, -1", placeholder instruction to load oop-info
duke@0 2376 // emit_call_dynamic_prologue( cbuf );
twisti@1668 2377 cbuf.set_insts_mark();
duke@0 2378
duke@0 2379 // movq rax, -1
duke@0 2380 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2381 emit_opcode(cbuf, 0xB8 | RAX_enc);
duke@0 2382 emit_d64_reloc(cbuf,
duke@0 2383 (int64_t) Universe::non_oop_word(),
duke@0 2384 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
twisti@1668 2385 address virtual_call_oop_addr = cbuf.insts_mark();
duke@0 2386 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
duke@0 2387 // who we intended to call.
twisti@1668 2388 cbuf.set_insts_mark();
duke@0 2389 $$$emit8$primary;
duke@0 2390 emit_d32_reloc(cbuf,
twisti@1668 2391 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4),
duke@0 2392 virtual_call_Relocation::spec(virtual_call_oop_addr),
duke@0 2393 RELOC_DISP32);
duke@0 2394 %}
duke@0 2395
duke@0 2396 enc_class Java_Compiled_Call(method meth)
duke@0 2397 %{
duke@0 2398 // JAVA COMPILED CALL
duke@0 2399 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
duke@0 2400
duke@0 2401 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
duke@0 2402 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
duke@0 2403
duke@0 2404 // callq *disp(%rax)
twisti@1668 2405 cbuf.set_insts_mark();
duke@0 2406 $$$emit8$primary;
duke@0 2407 if (disp < 0x80) {
duke@0 2408 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
duke@0 2409 emit_d8(cbuf, disp); // Displacement
duke@0 2410 } else {
duke@0 2411 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
duke@0 2412 emit_d32(cbuf, disp); // Displacement
duke@0 2413 }
duke@0 2414 %}
duke@0 2415
duke@0 2416 enc_class reg_opc_imm(rRegI dst, immI8 shift)
duke@0 2417 %{
duke@0 2418 // SAL, SAR, SHR
duke@0 2419 int dstenc = $dst$$reg;
duke@0 2420 if (dstenc >= 8) {
duke@0 2421 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2422 dstenc -= 8;
duke@0 2423 }
duke@0 2424 $$$emit8$primary;
duke@0 2425 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2426 $$$emit8$shift$$constant;
duke@0 2427 %}
duke@0 2428
duke@0 2429 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
duke@0 2430 %{
duke@0 2431 // SAL, SAR, SHR
duke@0 2432 int dstenc = $dst$$reg;
duke@0 2433 if (dstenc < 8) {
duke@0 2434 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2435 } else {
duke@0 2436 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2437 dstenc -= 8;
duke@0 2438 }
duke@0 2439 $$$emit8$primary;
duke@0 2440 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2441 $$$emit8$shift$$constant;
duke@0 2442 %}
duke@0 2443
duke@0 2444 enc_class load_immI(rRegI dst, immI src)
duke@0 2445 %{
duke@0 2446 int dstenc = $dst$$reg;
duke@0 2447 if (dstenc >= 8) {
duke@0 2448 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2449 dstenc -= 8;
duke@0 2450 }
duke@0 2451 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2452 $$$emit32$src$$constant;
duke@0 2453 %}
duke@0 2454
duke@0 2455 enc_class load_immL(rRegL dst, immL src)
duke@0 2456 %{
duke@0 2457 int dstenc = $dst$$reg;
duke@0 2458 if (dstenc < 8) {
duke@0 2459 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2460 } else {
duke@0 2461 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2462 dstenc -= 8;
duke@0 2463 }
duke@0 2464 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2465 emit_d64(cbuf, $src$$constant);
duke@0 2466 %}
duke@0 2467
duke@0 2468 enc_class load_immUL32(rRegL dst, immUL32 src)
duke@0 2469 %{
duke@0 2470 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2471 int dstenc = $dst$$reg;
duke@0 2472 if (dstenc >= 8) {
duke@0 2473 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2474 dstenc -= 8;
duke@0 2475 }
duke@0 2476 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2477 $$$emit32$src$$constant;
duke@0 2478 %}
duke@0 2479
duke@0 2480 enc_class load_immL32(rRegL dst, immL32 src)
duke@0 2481 %{
duke@0 2482 int dstenc = $dst$$reg;
duke@0 2483 if (dstenc < 8) {
duke@0 2484 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2485 } else {
duke@0 2486 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2487 dstenc -= 8;
duke@0 2488 }
duke@0 2489 emit_opcode(cbuf, 0xC7);
duke@0 2490 emit_rm(cbuf, 0x03, 0x00, dstenc);
duke@0 2491 $$$emit32$src$$constant;
duke@0 2492 %}
duke@0 2493
duke@0 2494 enc_class load_immP31(rRegP dst, immP32 src)
duke@0 2495 %{
duke@0 2496 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2497 int dstenc = $dst$$reg;
duke@0 2498 if (dstenc >= 8) {
duke@0 2499 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2500 dstenc -= 8;
duke@0 2501 }
duke@0 2502 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2503 $$$emit32$src$$constant;
duke@0 2504 %}
duke@0 2505
duke@0 2506 enc_class load_immP(rRegP dst, immP src)
duke@0 2507 %{
duke@0 2508 int dstenc = $dst$$reg;
duke@0 2509 if (dstenc < 8) {
duke@0 2510 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2511 } else {
duke@0 2512 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2513 dstenc -= 8;
duke@0 2514 }
duke@0 2515 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2516 // This next line should be generated from ADLC
duke@0 2517 if ($src->constant_is_oop()) {
duke@0 2518 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
duke@0 2519 } else {
duke@0 2520 emit_d64(cbuf, $src$$constant);
duke@0 2521 }
duke@0 2522 %}
duke@0 2523
duke@0 2524 enc_class Con32(immI src)
duke@0 2525 %{
duke@0 2526 // Output immediate
duke@0 2527 $$$emit32$src$$constant;
duke@0 2528 %}
duke@0 2529
duke@0 2530 enc_class Con64(immL src)
duke@0 2531 %{
duke@0 2532 // Output immediate
duke@0 2533 emit_d64($src$$constant);
duke@0 2534 %}
duke@0 2535
duke@0 2536 enc_class Con32F_as_bits(immF src)
duke@0 2537 %{
duke@0 2538 // Output Float immediate bits
duke@0 2539 jfloat jf = $src$$constant;
duke@0 2540 jint jf_as_bits = jint_cast(jf);
duke@0 2541 emit_d32(cbuf, jf_as_bits);
duke@0 2542 %}
duke@0 2543
duke@0 2544 enc_class Con16(immI src)
duke@0 2545 %{
duke@0 2546 // Output immediate
duke@0 2547 $$$emit16$src$$constant;
duke@0 2548 %}
duke@0 2549
duke@0 2550 // How is this different from Con32??? XXX
duke@0 2551 enc_class Con_d32(immI src)
duke@0 2552 %{
duke@0 2553 emit_d32(cbuf,$src$$constant);
duke@0 2554 %}
duke@0 2555
duke@0 2556 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
duke@0 2557 // Output immediate memory reference
duke@0 2558 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
duke@0 2559 emit_d32(cbuf, 0x00);
duke@0 2560 %}
duke@0 2561
duke@0 2562 enc_class lock_prefix()
duke@0 2563 %{
duke@0 2564 if (os::is_MP()) {
duke@0 2565 emit_opcode(cbuf, 0xF0); // lock
duke@0 2566 }
duke@0 2567 %}
duke@0 2568
duke@0 2569 enc_class REX_mem(memory mem)
duke@0 2570 %{
duke@0 2571 if ($mem$$base >= 8) {
duke@0 2572 if ($mem$$index < 8) {
duke@0 2573 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2574 } else {
duke@0 2575 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 2576 }
duke@0 2577 } else {
duke@0 2578 if ($mem$$index >= 8) {
duke@0 2579 emit_opcode(cbuf, Assembler::REX_X);
duke@0 2580 }
duke@0 2581 }
duke@0 2582 %}
duke@0 2583
duke@0 2584 enc_class REX_mem_wide(memory mem)
duke@0 2585 %{
duke@0 2586 if ($mem$$base >= 8) {
duke@0 2587 if ($mem$$index < 8) {
duke@0 2588 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2589 } else {
duke@0 2590 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 2591 }
duke@0 2592 } else {
duke@0 2593 if ($mem$$index < 8) {
duke@0 2594 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2595 } else {
duke@0 2596 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 2597 }
duke@0 2598 }
duke@0 2599 %}
duke@0 2600
duke@0 2601 // for byte regs
duke@0 2602 enc_class REX_breg(rRegI reg)
duke@0 2603 %{
duke@0 2604 if ($reg$$reg >= 4) {
duke@0 2605 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2606 }
duke@0 2607 %}
duke@0 2608
duke@0 2609 // for byte regs
duke@0 2610 enc_class REX_reg_breg(rRegI dst, rRegI src)
duke@0 2611 %{
duke@0 2612 if ($dst$$reg < 8) {
duke@0 2613 if ($src$$reg >= 4) {
duke@0 2614 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2615 }
duke@0 2616 } else {
duke@0 2617 if ($src$$reg < 8) {
duke@0 2618 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2619 } else {
duke@0 2620 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2621 }
duke@0 2622 }
duke@0 2623 %}
duke@0 2624
duke@0 2625 // for byte regs
duke@0 2626 enc_class REX_breg_mem(rRegI reg, memory mem)
duke@0 2627 %{
duke@0 2628 if ($reg$$reg < 8) {
duke@0 2629 if ($mem$$base < 8) {
duke@0 2630 if ($mem$$index >= 8) {
duke@0 2631 emit_opcode(cbuf, Assembler::REX_X);
duke@0 2632 } else if ($reg$$reg >= 4) {
duke@0 2633 emit_opcode(cbuf, Assembler::REX);
duke@0 2634 }
duke@0 2635 } else {
duke@0 2636 if ($mem$$index < 8) {
duke@0 2637 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2638 } else {
duke@0 2639 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 2640 }
duke@0 2641 }
duke@0 2642 } else {
duke@0 2643 if ($mem$$base < 8) {
duke@0 2644 if ($mem$$index < 8) {
duke@0 2645 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2646 } else {
duke@0 2647 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 2648 }
duke@0 2649 } else {
duke@0 2650 if ($mem$$index < 8) {
duke@0 2651 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2652 } else {
duke@0 2653 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 2654 }
duke@0 2655 }
duke@0 2656 }
duke@0 2657 %}
duke@0 2658
duke@0 2659 enc_class REX_reg(rRegI reg)
duke@0 2660 %{
duke@0 2661 if ($reg$$reg >= 8) {
duke@0 2662 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2663 }
duke@0 2664 %}
duke@0 2665
duke@0 2666 enc_class REX_reg_wide(rRegI reg)
duke@0 2667 %{
duke@0 2668 if ($reg$$reg < 8) {
duke@0 2669 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2670 } else {
duke@0 2671 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2672 }
duke@0 2673 %}
duke@0 2674
duke@0 2675 enc_class REX_reg_reg(rRegI dst, rRegI src)
duke@0 2676 %{
duke@0 2677 if ($dst$$reg < 8) {
duke@0 2678 if ($src$$reg >= 8) {
duke@0 2679 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2680 }
duke@0 2681 } else {
duke@0 2682 if ($src$$reg < 8) {
duke@0 2683 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2684 } else {
duke@0 2685 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2686 }
duke@0 2687 }
duke@0 2688 %}
duke@0 2689
duke@0 2690 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
duke@0 2691 %{
duke@0 2692 if ($dst$$reg < 8) {
duke@0 2693 if ($src$$reg < 8) {
duke@0 2694 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2695 } else {
duke@0 2696 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2697 }
duke@0 2698 } else {
duke@0 2699 if ($src$$reg < 8) {
duke@0 2700 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2701 } else {
duke@0 2702 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2703 }
duke@0 2704 }
duke@0 2705 %}
duke@0 2706
duke@0 2707 enc_class REX_reg_mem(rRegI reg, memory mem)
duke@0 2708 %{
duke@0 2709 if ($reg$$reg < 8) {
duke@0 2710 if ($mem$$base < 8) {
duke@0 2711 if ($mem$$index >= 8) {
duke@0 2712 emit_opcode(cbuf, Assembler::REX_X);
duke@0 2713 }
duke@0 2714 } else {
duke@0 2715 if ($mem$$index < 8) {
duke@0 2716 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2717 } else {
duke@0 2718 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 2719 }
duke@0 2720 }
duke@0 2721 } else {
duke@0 2722 if ($mem$$base < 8) {
duke@0 2723 if ($mem$$index < 8) {
duke@0 2724 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2725 } else {
duke@0 2726 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 2727 }
duke@0 2728 } else {
duke@0 2729 if ($mem$$index < 8) {
duke@0 2730 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2731 } else {
duke@0 2732 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 2733 }
duke@0 2734 }
duke@0 2735 }
duke@0 2736 %}
duke@0 2737
duke@0 2738 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
duke@0 2739 %{
duke@0 2740 if ($reg$$reg < 8) {
duke@0 2741 if ($mem$$base < 8) {
duke@0 2742 if ($mem$$index < 8) {
duke@0 2743 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2744 } else {
duke@0 2745 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 2746 }
duke@0 2747 } else {
duke@0 2748 if ($mem$$index < 8) {
duke@0 2749 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2750 } else {
duke@0 2751 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 2752 }
duke@0 2753 }
duke@0 2754 } else {
duke@0 2755 if ($mem$$base < 8) {
duke@0 2756 if ($mem$$index < 8) {
duke@0 2757 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2758 } else {
duke@0 2759 emit_opcode(cbuf, Assembler::REX_WRX);
duke@0 2760 }
duke@0 2761 } else {
duke@0 2762 if ($mem$$index < 8) {
duke@0 2763 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2764 } else {
duke@0 2765 emit_opcode(cbuf, Assembler::REX_WRXB);
duke@0 2766 }
duke@0 2767 }
duke@0 2768 }
duke@0 2769 %}
duke@0 2770
duke@0 2771 enc_class reg_mem(rRegI ereg, memory mem)
duke@0 2772 %{
duke@0 2773 // High registers handle in encode_RegMem
duke@0 2774 int reg = $ereg$$reg;
duke@0 2775 int base = $mem$$base;
duke@0 2776 int index = $mem$$index;
duke@0 2777 int scale = $mem$$scale;
duke@0 2778 int disp = $mem$$disp;
duke@0 2779 bool disp_is_oop = $mem->disp_is_oop();
duke@0 2780
duke@0 2781 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
duke@0 2782 %}
duke@0 2783
duke@0 2784 enc_class RM_opc_mem(immI rm_opcode, memory mem)
duke@0 2785 %{
duke@0 2786 int rm_byte_opcode = $rm_opcode$$constant;
duke@0 2787
duke@0 2788 // High registers handle in encode_RegMem
duke@0 2789 int base = $mem$$base;
duke@0 2790 int index = $mem$$index;
duke@0 2791 int scale = $mem$$scale;
duke@0 2792 int displace = $mem$$disp;
duke@0 2793
duke@0 2794 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
duke@0 2795 // working with static
duke@0 2796 // globals
duke@0 2797 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
duke@0 2798 disp_is_oop);
duke@0 2799 %}
duke@0 2800
duke@0 2801 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
duke@0 2802 %{
duke@0 2803 int reg_encoding = $dst$$reg;
duke@0 2804 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
duke@0 2805 int index = 0x04; // 0x04 indicates no index
duke@0 2806 int scale = 0x00; // 0x00 indicates no scale
duke@0 2807 int displace = $src1$$constant; // 0x00 indicates no displacement
duke@0 2808 bool disp_is_oop = false;
duke@0 2809 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
duke@0 2810 disp_is_oop);
duke@0 2811 %}
duke@0 2812
duke@0 2813 enc_class neg_reg(rRegI dst)
duke@0 2814 %{
duke@0 2815 int dstenc = $dst$$reg;
duke@0 2816 if (dstenc >= 8) {
duke@0 2817 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2818 dstenc -= 8;
duke@0 2819 }
duke@0 2820 // NEG $dst
duke@0 2821 emit_opcode(cbuf, 0xF7);
duke@0 2822 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 2823 %}
duke@0 2824
duke@0 2825 enc_class neg_reg_wide(rRegI dst)
duke@0 2826 %{
duke@0 2827 int dstenc = $dst$$reg;
duke@0 2828 if (dstenc < 8) {
duke@0 2829 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2830 } else {
duke@0 2831 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2832 dstenc -= 8;
duke@0 2833 }
duke@0 2834 // NEG $dst
duke@0 2835 emit_opcode(cbuf, 0xF7);
duke@0 2836 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 2837 %}
duke@0 2838
duke@0 2839 enc_class setLT_reg(rRegI dst)
duke@0 2840 %{
duke@0 2841 int dstenc = $dst$$reg;
duke@0 2842 if (dstenc >= 8) {
duke@0 2843 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2844 dstenc -= 8;
duke@0 2845 } else if (dstenc >= 4) {
duke@0 2846 emit_opcode(cbuf, Assembler::REX);
duke@0 2847 }
duke@0 2848 // SETLT $dst
duke@0 2849 emit_opcode(cbuf, 0x0F);
duke@0 2850 emit_opcode(cbuf, 0x9C);
duke@0 2851 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 2852 %}
duke@0 2853
duke@0 2854 enc_class setNZ_reg(rRegI dst)
duke@0 2855 %{
duke@0 2856 int dstenc = $dst$$reg;
duke@0 2857 if (dstenc >= 8) {
duke@0 2858 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2859 dstenc -= 8;
duke@0 2860 } else if (dstenc >= 4) {
duke@0 2861 emit_opcode(cbuf, Assembler::REX);
duke@0 2862 }
duke@0 2863 // SETNZ $dst
duke@0 2864 emit_opcode(cbuf, 0x0F);
duke@0 2865 emit_opcode(cbuf, 0x95);
duke@0 2866 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 2867 %}
duke@0 2868
duke@0 2869
duke@0 2870 // Compare the lonogs and set -1, 0, or 1 into dst
duke@0 2871 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
duke@0 2872 %{
duke@0 2873 int src1enc = $src1$$reg;
duke@0 2874 int src2enc = $src2$$reg;
duke@0 2875 int dstenc = $dst$$reg;
duke@0 2876
duke@0 2877 // cmpq $src1, $src2
duke@0 2878 if (src1enc < 8) {
duke@0 2879 if (src2enc < 8) {
duke@0 2880 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2881 } else {
duke@0 2882 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2883 }
duke@0 2884 } else {
duke@0 2885 if (src2enc < 8) {
duke@0 2886 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2887 } else {
duke@0 2888 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2889 }
duke@0 2890 }
duke@0 2891 emit_opcode(cbuf, 0x3B);
duke@0 2892 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
duke@0 2893
duke@0 2894 // movl $dst, -1
duke@0 2895 if (dstenc >= 8) {
duke@0 2896 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2897 }
duke@0 2898 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 2899 emit_d32(cbuf, -1);
duke@0 2900
duke@0 2901 // jl,s done
duke@0 2902 emit_opcode(cbuf, 0x7C);
duke@0 2903 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 2904
duke@0 2905 // setne $dst
duke@0 2906 if (dstenc >= 4) {
duke@0 2907 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2908 }
duke@0 2909 emit_opcode(cbuf, 0x0F);
duke@0 2910 emit_opcode(cbuf, 0x95);
duke@0 2911 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 2912
duke@0 2913 // movzbl $dst, $dst
duke@0 2914 if (dstenc >= 4) {
duke@0 2915 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 2916 }
duke@0 2917 emit_opcode(cbuf, 0x0F);
duke@0 2918 emit_opcode(cbuf, 0xB6);
duke@0 2919 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 2920 %}
duke@0 2921
duke@0 2922 enc_class Push_ResultXD(regD dst) %{
kvn@2953 2923 MacroAssembler _masm(&cbuf);
kvn@2953 2924 __ fstp_d(Address(rsp, 0));
kvn@2953 2925 __ movdbl($dst$$XMMRegister, Address(rsp, 0));
kvn@2953 2926 __ addptr(rsp, 8);
duke@0 2927 %}
duke@0 2928
duke@0 2929 enc_class Push_SrcXD(regD src) %{
duke@0 2930 MacroAssembler _masm(&cbuf);
kvn@2953 2931 __ subptr(rsp, 8);
kvn@2953 2932 __ movdbl(Address(rsp, 0), $src$$XMMRegister);
kvn@2953 2933 __ fld_d(Address(rsp, 0));
kvn@2953 2934 %}
kvn@2953 2935
duke@0 2936
duke@0 2937 // obj: object to lock
duke@0 2938 // box: box address (header location) -- killed
duke@0 2939 // tmp: rax -- killed
duke@0 2940 // scr: rbx -- killed
duke@0 2941 //
duke@0 2942 // What follows is a direct transliteration of fast_lock() and fast_unlock()
duke@0 2943 // from i486.ad. See that file for comments.
duke@0 2944 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
duke@0 2945 // use the shorter encoding. (Movl clears the high-order 32-bits).
duke@0 2946
duke@0 2947
duke@0 2948 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
duke@0 2949 %{
duke@0 2950 Register objReg = as_Register((int)$obj$$reg);
duke@0 2951 Register boxReg = as_Register((int)$box$$reg);
duke@0 2952 Register tmpReg = as_Register($tmp$$reg);
duke@0 2953 Register scrReg = as_Register($scr$$reg);
duke@0 2954 MacroAssembler masm(&cbuf);
duke@0 2955
duke@0 2956 // Verify uniqueness of register assignments -- necessary but not sufficient
duke@0 2957 assert (objReg != boxReg && objReg != tmpReg &&
duke@0 2958 objReg != scrReg && tmpReg != scrReg, "invariant") ;
duke@0 2959
duke@0 2960 if (_counters != NULL) {
duke@0 2961 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
duke@0 2962 }
duke@0 2963 if (EmitSync & 1) {
never@304 2964 // Without cast to int32_t a movptr will destroy r10 which is typically obj
iveresov@2251 2965 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
iveresov@2251 2966 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
duke@0 2967 } else
duke@0 2968 if (EmitSync & 2) {
duke@0 2969 Label DONE_LABEL;
duke@0 2970 if (UseBiasedLocking) {
duke@0 2971 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
duke@0 2972 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
duke@0 2973 }
never@304 2974 // QQQ was movl...
never@304 2975 masm.movptr(tmpReg, 0x1);
never@304 2976 masm.orptr(tmpReg, Address(objReg, 0));
never@304 2977 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 2978 if (os::is_MP()) {
duke@0 2979 masm.lock();
duke@0 2980 }
never@304 2981 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 2982 masm.jcc(Assembler::equal, DONE_LABEL);
duke@0 2983
duke@0 2984 // Recursive locking
never@304 2985 masm.subptr(tmpReg, rsp);
never@304 2986 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 2987 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 2988
duke@0 2989 masm.bind(DONE_LABEL);
duke@0 2990 masm.nop(); // avoid branch to branch
duke@0 2991 } else {
duke@0 2992 Label DONE_LABEL, IsInflated, Egress;
duke@0 2993
iveresov@2251 2994 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@304 2995 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
iveresov@2251 2996 masm.jcc (Assembler::notZero, IsInflated) ;
iveresov@2251 2997
duke@0 2998 // it's stack-locked, biased or neutral
duke@0 2999 // TODO: optimize markword triage order to reduce the number of
duke@0 3000 // conditional branches in the most common cases.
duke@0 3001 // Beware -- there's a subtle invariant that fetch of the markword
duke@0 3002 // at [FETCH], below, will never observe a biased encoding (*101b).
duke@0 3003 // If this invariant is not held we'll suffer exclusion (safety) failure.
duke@0 3004
kvn@420 3005 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3006 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
never@304 3007 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
duke@0 3008 }
duke@0 3009
never@304 3010 // was q will it destroy high?
iveresov@2251 3011 masm.orl (tmpReg, 1) ;
iveresov@2251 3012 masm.movptr(Address(boxReg, 0), tmpReg) ;
iveresov@2251 3013 if (os::is_MP()) { masm.lock(); }
never@304 3014 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3015 if (_counters != NULL) {
duke@0 3016 masm.cond_inc32(Assembler::equal,
duke@0 3017 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3018 }
duke@0 3019 masm.jcc (Assembler::equal, DONE_LABEL);
duke@0 3020
duke@0 3021 // Recursive locking
never@304 3022 masm.subptr(tmpReg, rsp);
never@304 3023 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@304 3024 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3025 if (_counters != NULL) {
duke@0 3026 masm.cond_inc32(Assembler::equal,
duke@0 3027 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3028 }
duke@0 3029 masm.jmp (DONE_LABEL) ;
duke@0 3030
duke@0 3031 masm.bind (IsInflated) ;
duke@0 3032 // It's inflated
duke@0 3033
duke@0 3034 // TODO: someday avoid the ST-before-CAS penalty by
duke@0 3035 // relocating (deferring) the following ST.
duke@0 3036 // We should also think about trying a CAS without having
duke@0 3037 // fetched _owner. If the CAS is successful we may
duke@0 3038 // avoid an RTO->RTS upgrade on the $line.
never@304 3039 // Without cast to int32_t a movptr will destroy r10 which is typically obj
iveresov@2251 3040 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
iveresov@2251 3041
iveresov@2251 3042 masm.mov (boxReg, tmpReg) ;
iveresov@2251 3043 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
iveresov@2251 3044 masm.testptr(tmpReg, tmpReg) ;
iveresov@2251 3045 masm.jcc (Assembler::notZero, DONE_LABEL) ;
duke@0 3046
duke@0 3047 // It's inflated and appears unlocked
iveresov@2251 3048 if (os::is_MP()) { masm.lock(); }
iveresov@2251 3049 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@0 3050 // Intentional fall-through into DONE_LABEL ...
duke@0 3051
duke@0 3052 masm.bind (DONE_LABEL) ;
duke@0 3053 masm.nop () ; // avoid jmp to jmp
duke@0 3054 }
duke@0 3055 %}
duke@0 3056
duke@0 3057 // obj: object to unlock
duke@0 3058 // box: box address (displaced header location), killed
duke@0 3059 // RBX: killed tmp; cannot be obj nor box
duke@0 3060 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
duke@0 3061 %{
duke@0 3062
duke@0 3063 Register objReg = as_Register($obj$$reg);
duke@0 3064 Register boxReg = as_Register($box$$reg);
duke@0 3065 Register tmpReg = as_Register($tmp$$reg);
duke@0 3066 MacroAssembler masm(&cbuf);
duke@0 3067
iveresov@2251 3068 if (EmitSync & 4) {
iveresov@2251 3069 masm.cmpptr(rsp, 0) ;
duke@0 3070 } else
duke@0 3071 if (EmitSync & 8) {
duke@0 3072 Label DONE_LABEL;
duke@0 3073 if (UseBiasedLocking) {
duke@0 3074 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3075 }
duke@0 3076
duke@0 3077 // Check whether the displaced header is 0
duke@0 3078 //(=> recursive unlock)
never@304 3079 masm.movptr(tmpReg, Address(boxReg, 0));
never@304 3080 masm.testptr(tmpReg, tmpReg);
duke@0 3081 masm.jcc(Assembler::zero, DONE_LABEL);
duke@0 3082
duke@0 3083 // If not recursive lock, reset the header to displaced header
duke@0 3084 if (os::is_MP()) {
duke@0 3085 masm.lock();
duke@0 3086 }
never@304 3087 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3088 masm.bind(DONE_LABEL);
duke@0 3089 masm.nop(); // avoid branch to branch
duke@0 3090 } else {
duke@0 3091 Label DONE_LABEL, Stacked, CheckSucc ;
duke@0 3092
kvn@420 3093 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3094 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3095 }
iveresov@2251 3096
iveresov@2251 3097 masm.movptr(tmpReg, Address(objReg, 0)) ;
iveresov@2251 3098 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
iveresov@2251 3099 masm.jcc (Assembler::zero, DONE_LABEL) ;
iveresov@2251 3100 masm.testl (tmpReg, 0x02) ;
iveresov@2251 3101 masm.jcc (Assembler::zero, Stacked) ;
iveresov@2251 3102
duke@0 3103 // It's inflated
iveresov@2251 3104 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
iveresov@2251 3105 masm.xorptr(boxReg, r15_thread) ;
iveresov@2251 3106 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
iveresov@2251 3107 masm.jcc (Assembler::notZero, DONE_LABEL) ;
iveresov@2251 3108 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
iveresov@2251 3109 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
iveresov@2251 3110 masm.jcc (Assembler::notZero, CheckSucc) ;
iveresov@2251 3111 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
iveresov@2251 3112 masm.jmp (DONE_LABEL) ;
iveresov@2251 3113
iveresov@2251 3114 if ((EmitSync & 65536) == 0) {
duke@0 3115 Label LSuccess, LGoSlowPath ;
duke@0 3116 masm.bind (CheckSucc) ;
never@304 3117 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3118 masm.jcc (Assembler::zero, LGoSlowPath) ;
duke@0 3119
duke@0 3120 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
duke@0 3121 // the explicit ST;MEMBAR combination, but masm doesn't currently support
duke@0 3122 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
duke@0 3123 // are all faster when the write buffer is populated.
never@304 3124 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3125 if (os::is_MP()) {
never@304 3126 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
duke@0 3127 }
never@304 3128 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3129 masm.jcc (Assembler::notZero, LSuccess) ;
duke@0 3130
never@304 3131 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
duke@0 3132 if (os::is_MP()) { masm.lock(); }
never@304 3133 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
duke@0 3134 masm.jcc (Assembler::notEqual, LSuccess) ;
duke@0 3135 // Intentional fall-through into slow-path
duke@0 3136
duke@0 3137 masm.bind (LGoSlowPath) ;
duke@0 3138 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
duke@0 3139 masm.jmp (DONE_LABEL) ;
duke@0 3140
duke@0 3141 masm.bind (LSuccess) ;
duke@0 3142 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
duke@0 3143 masm.jmp (DONE_LABEL) ;
duke@0 3144 }
duke@0 3145
iveresov@2251 3146 masm.bind (Stacked) ;
never@304 3147 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
iveresov@2251 3148 if (os::is_MP()) { masm.lock(); }
never@304 3149 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3150
duke@0 3151 if (EmitSync & 65536) {
duke@0 3152 masm.bind (CheckSucc) ;
duke@0 3153 }
duke@0 3154 masm.bind(DONE_LABEL);
duke@0 3155 if (EmitSync & 32768) {
duke@0 3156 masm.nop(); // avoid branch to branch
duke@0 3157 }
duke@0 3158 }
duke@0 3159 %}
duke@0 3160
rasbold@169 3161
duke@0 3162 enc_class enc_rethrow()
duke@0 3163 %{
twisti@1668 3164 cbuf.set_insts_mark();
duke@0 3165 emit_opcode(cbuf, 0xE9); // jmp entry
duke@0 3166 emit_d32_reloc(cbuf,
twisti@1668 3167 (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4),
duke@0 3168 runtime_call_Relocation::spec(),
duke@0 3169 RELOC_DISP32);
duke@0 3170 %}
duke@0 3171
duke@0 3172 %}
duke@0 3173
duke@0 3174
coleenp@113 3175
duke@0 3176 //----------FRAME--------------------------------------------------------------
duke@0 3177 // Definition of frame structure and management information.
duke@0 3178 //
duke@0 3179 // S T A C K L A Y O U T Allocators stack-slot number
duke@0 3180 // | (to get allocators register number
duke@0 3181 // G Owned by | | v add OptoReg::stack0())
duke@0 3182 // r CALLER | |
duke@0 3183 // o | +--------+ pad to even-align allocators stack-slot
duke@0 3184 // w V | pad0 | numbers; owned by CALLER
duke@0 3185 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
duke@0 3186 // h ^ | in | 5
duke@0 3187 // | | args | 4 Holes in incoming args owned by SELF
duke@0 3188 // | | | | 3
duke@0 3189 // | | +--------+
duke@0 3190 // V | | old out| Empty on Intel, window on Sparc
duke@0 3191 // | old |preserve| Must be even aligned.
duke@0 3192 // | SP-+--------+----> Matcher::_old_SP, even aligned
duke@0 3193 // | | in | 3 area for Intel ret address
duke@0 3194 // Owned by |preserve| Empty on Sparc.
duke@0 3195 // SELF +--------+
duke@0 3196 // | | pad2 | 2 pad to align old SP
duke@0 3197 // | +--------+ 1
duke@0 3198 // | | locks | 0
duke@0 3199 // | +--------+----> OptoReg::stack0(), even aligned
duke@0 3200 // | | pad1 | 11 pad to align new SP
duke@0 3201 // | +--------+
duke@0 3202 // | | | 10
duke@0 3203 // | | spills | 9 spills
duke@0 3204 // V | | 8 (pad0 slot for callee)
duke@0 3205 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
duke@0 3206 // ^ | out | 7
duke@0 3207 // | | args | 6 Holes in outgoing args owned by CALLEE
duke@0 3208 // Owned by +--------+
duke@0 3209 // CALLEE | new out| 6 Empty on Intel, window on Sparc
duke@0 3210 // | new |preserve| Must be even-aligned.
duke@0 3211 // | SP-+--------+----> Matcher::_new_SP, even aligned
duke@0 3212 // | | |
duke@0 3213 //
duke@0 3214 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
duke@0 3215 // known from SELF's arguments and the Java calling convention.
duke@0 3216 // Region 6-7 is determined per call site.
duke@0 3217 // Note 2: If the calling convention leaves holes in the incoming argument
duke@0 3218 // area, those holes are owned by SELF. Holes in the outgoing area
duke@0 3219 // are owned by the CALLEE. Holes should not be nessecary in the
duke@0 3220 // incoming area, as the Java calling convention is completely under
duke@0 3221 // the control of the AD file. Doubles can be sorted and packed to
duke@0 3222 // avoid holes. Holes in the outgoing arguments may be nessecary for
duke@0 3223 // varargs C calling conventions.
duke@0 3224 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
duke@0 3225 // even aligned with pad0 as needed.
duke@0 3226 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
duke@0 3227 // region 6-11 is even aligned; it may be padded out more so that
duke@0 3228 // the region from SP to FP meets the minimum stack alignment.
duke@0 3229 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
duke@0 3230 // alignment. Region 11, pad1, may be dynamically extended so that
duke@0 3231 // SP meets the minimum alignment.
duke@0 3232
duke@0 3233 frame
duke@0 3234 %{
duke@0 3235 // What direction does stack grow in (assumed to be same for C & Java)
duke@0 3236 stack_direction(TOWARDS_LOW);
duke@0 3237
duke@0 3238 // These three registers define part of the calling convention
duke@0 3239 // between compiled code and the interpreter.
duke@0 3240 inline_cache_reg(RAX); // Inline Cache Register
duke@0 3241 interpreter_method_oop_reg(RBX); // Method Oop Register when
duke@0 3242 // calling interpreter
duke@0 3243
duke@0 3244 // Optional: name the operand used by cisc-spilling to access
duke@0 3245 // [stack_pointer + offset]
duke@0 3246 cisc_spilling_operand_name(indOffset32);
duke@0 3247
duke@0 3248 // Number of stack slots consumed by locking an object
duke@0 3249 sync_stack_slots(2);
duke@0 3250
duke@0 3251 // Compiled code's Frame Pointer
duke@0 3252 frame_pointer(RSP);
duke@0 3253
duke@0 3254 // Interpreter stores its frame pointer in a register which is
duke@0 3255 // stored to the stack by I2CAdaptors.
duke@0 3256 // I2CAdaptors convert from interpreted java to compiled java.
duke@0 3257 interpreter_frame_pointer(RBP);
duke@0 3258
duke@0 3259 // Stack alignment requirement
duke@0 3260 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
duke@0 3261
duke@0 3262 // Number of stack slots between incoming argument block and the start of
duke@0 3263 // a new frame. The PROLOG must add this many slots to the stack. The
duke@0 3264 // EPILOG must remove this many slots. amd64 needs two slots for
duke@0 3265 // return address.
duke@0 3266 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls);
duke@0 3267
duke@0 3268 // Number of outgoing stack slots killed above the out_preserve_stack_slots
duke@0 3269 // for calls to C. Supports the var-args backing area for register parms.
duke@0 3270 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
duke@0 3271
duke@0 3272 // The after-PROLOG location of the return address. Location of
duke@0 3273 // return address specifies a type (REG or STACK) and a number
duke@0 3274 // representing the register number (i.e. - use a register name) or
duke@0 3275 // stack slot.
duke@0 3276 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
duke@0 3277 // Otherwise, it is above the locks and verification slot and alignment word
duke@0 3278 return_addr(STACK - 2 +
duke@0 3279 round_to(2 + 2 * VerifyStackAtCalls +
duke@0 3280 Compile::current()->fixed_slots(),
duke@0 3281 WordsPerLong * 2));
duke@0 3282
duke@0 3283 // Body of function which returns an integer array locating
duke@0 3284 // arguments either in registers or in stack slots. Passed an array
duke@0 3285 // of ideal registers called "sig" and a "length" count. Stack-slot
duke@0 3286 // offsets are based on outgoing arguments, i.e. a CALLER setting up
duke@0 3287 // arguments for a CALLEE. Incoming stack arguments are
duke@0 3288 // automatically biased by the preserve_stack_slots field above.
duke@0 3289
duke@0 3290 calling_convention
duke@0 3291 %{
duke@0 3292 // No difference between ingoing/outgoing just pass false
duke@0 3293 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
duke@0 3294 %}
duke@0 3295
duke@0 3296 c_calling_convention
duke@0 3297 %{
duke@0 3298 // This is obviously always outgoing
duke@0 3299 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
duke@0 3300 %}
duke@0 3301
duke@0 3302 // Location of compiled Java return values. Same as C for now.
duke@0 3303 return_value
duke@0 3304 %{
duke@0 3305 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
duke@0 3306 "only return normal values");
duke@0 3307
duke@0 3308 static const int lo[Op_RegL + 1] = {
duke@0 3309 0,
duke@0 3310 0,
coleenp@113 3311 RAX_num, // Op_RegN
duke@0 3312 RAX_num, // Op_RegI
duke@0 3313 RAX_num, // Op_RegP
duke@0 3314 XMM0_num, // Op_RegF
duke@0 3315 XMM0_num, // Op_RegD
duke@0 3316 RAX_num // Op_RegL
duke@0 3317 };
duke@0 3318 static const int hi[Op_RegL + 1] = {
duke@0 3319 0,
duke@0 3320 0,
coleenp@113 3321 OptoReg::Bad, // Op_RegN
duke@0 3322 OptoReg::Bad, // Op_RegI
duke@0 3323 RAX_H_num, // Op_RegP
duke@0 3324 OptoReg::Bad, // Op_RegF
duke@0 3325 XMM0_H_num, // Op_RegD
duke@0 3326 RAX_H_num // Op_RegL
duke@0 3327 };
coleenp@113 3328 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type");
duke@0 3329 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
duke@0 3330 %}
duke@0 3331 %}
duke@0 3332
duke@0 3333 //----------ATTRIBUTES---------------------------------------------------------
duke@0 3334 //----------Operand Attributes-------------------------------------------------
duke@0 3335 op_attrib op_cost(0); // Required cost attribute
duke@0 3336
duke@0 3337 //----------Instruction Attributes---------------------------------------------
duke@0 3338 ins_attrib ins_cost(100); // Required cost attribute
duke@0 3339 ins_attrib ins_size(8); // Required size attribute (in bits)
duke@0 3340 ins_attrib ins_short_branch(0); // Required flag: is this instruction
duke@0 3341 // a non-matching short branch variant
duke@0 3342 // of some long branch?
duke@0 3343 ins_attrib ins_alignment(1); // Required alignment attribute (must
duke@0 3344 // be a power of 2) specifies the
duke@0 3345 // alignment that some part of the
duke@0 3346 // instruction (not necessarily the
duke@0 3347 // start) requires. If > 1, a
duke@0 3348 // compute_padding() function must be
duke@0 3349 // provided for the instruction
duke@0 3350
duke@0 3351 //----------OPERANDS-----------------------------------------------------------
duke@0 3352 // Operand definitions must precede instruction definitions for correct parsing
duke@0 3353 // in the ADLC because operands constitute user defined types which are used in
duke@0 3354 // instruction definitions.
duke@0 3355
duke@0 3356 //----------Simple Operands----------------------------------------------------
duke@0 3357 // Immediate Operands
duke@0 3358 // Integer Immediate
duke@0 3359 operand immI()
duke@0 3360 %{
duke@0 3361 match(ConI);
duke@0 3362
duke@0 3363 op_cost(10);
duke@0 3364 format %{ %}
duke@0 3365 interface(CONST_INTER);
duke@0 3366 %}
duke@0 3367
duke@0 3368 // Constant for test vs zero
duke@0 3369 operand immI0()
duke@0 3370 %{
duke@0 3371 predicate(n->get_int() == 0);
duke@0 3372 match(ConI);
duke@0 3373
duke@0 3374 op_cost(0);
duke@0 3375 format %{ %}
duke@0 3376 interface(CONST_INTER);
duke@0 3377 %}
duke@0 3378
duke@0 3379 // Constant for increment
duke@0 3380 operand immI1()
duke@0 3381 %{
duke@0 3382 predicate(n->get_int() == 1);
duke@0 3383 match(ConI);
duke@0 3384
duke@0 3385 op_cost(0);
duke@0 3386 format %{ %}
duke@0 3387 interface(CONST_INTER);
duke@0 3388 %}
duke@0 3389
duke@0 3390 // Constant for decrement
duke@0 3391 operand immI_M1()
duke@0 3392 %{
duke@0 3393 predicate(n->get_int() == -1);
duke@0 3394 match(ConI);
duke@0 3395
duke@0 3396 op_cost(0);
duke@0 3397 format %{ %}
duke@0 3398 interface(CONST_INTER);
duke@0 3399 %}
duke@0 3400
duke@0 3401 // Valid scale values for addressing modes
duke@0 3402 operand immI2()
duke@0 3403 %{
duke@0 3404 predicate(0 <= n->get_int() && (n->get_int() <= 3));
duke@0 3405 match(ConI);
duke@0 3406
duke@0 3407 format %{ %}
duke@0 3408 interface(CONST_INTER);
duke@0 3409 %}
duke@0 3410
duke@0 3411 operand immI8()
duke@0 3412 %{
duke@0 3413 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80));
duke@0 3414 match(ConI);
duke@0 3415
duke@0 3416 op_cost(5);
duke@0 3417 format %{ %}
duke@0 3418 interface(CONST_INTER);
duke@0 3419 %}
duke@0 3420
duke@0 3421 operand immI16()
duke@0 3422 %{
duke@0 3423 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
duke@0 3424 match(ConI);
duke@0 3425
duke@0 3426 op_cost(10);
duke@0 3427 format %{ %}
duke@0 3428 interface(CONST_INTER);
duke@0 3429 %}
duke@0 3430
duke@0 3431 // Constant for long shifts
duke@0 3432 operand immI_32()
duke@0 3433 %{
duke@0 3434 predicate( n->get_int() == 32 );
duke@0 3435 match(ConI);
duke@0 3436
duke@0 3437 op_cost(0);
duke@0 3438 format %{ %}
duke@0 3439 interface(CONST_INTER);
duke@0 3440 %}
duke@0 3441
duke@0 3442 // Constant for long shifts
duke@0 3443 operand immI_64()
duke@0 3444 %{
duke@0 3445 predicate( n->get_int() == 64 );
duke@0 3446 match(ConI);
duke@0 3447
duke@0 3448 op_cost(0);
duke@0 3449 format %{ %}
duke@0 3450 interface(CONST_INTER);
duke@0 3451 %}
duke@0 3452
duke@0 3453 // Pointer Immediate
duke@0 3454 operand immP()
duke@0 3455 %{
duke@0 3456 match(ConP);
duke@0 3457
duke@0 3458 op_cost(10);
duke@0 3459 format %{ %}
duke@0 3460 interface(CONST_INTER);
duke@0 3461 %}
duke@0 3462
duke@0 3463 // NULL Pointer Immediate
duke@0 3464 operand immP0()
duke@0 3465 %{
duke@0 3466 predicate(n->get_ptr() == 0);
duke@0 3467 match(ConP);
duke@0 3468
duke@0 3469 op_cost(5);
duke@0 3470 format %{ %}
duke@0 3471 interface(CONST_INTER);
duke@0 3472 %}
duke@0 3473
iveresov@2251 3474 operand immP_poll() %{
iveresov@2251 3475 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
iveresov@2251 3476 match(ConP);
iveresov@2251 3477
iveresov@2251 3478 // formats are generated automatically for constants and base registers
iveresov@2251 3479 format %{ %}
iveresov@2251 3480 interface(CONST_INTER);
iveresov@2251 3481 %}
iveresov@2251 3482
coleenp@113 3483 // Pointer Immediate
coleenp@113 3484 operand immN() %{
coleenp@113 3485 match(ConN);
coleenp@113 3486
coleenp@113 3487 op_cost(10);
coleenp@113 3488 format %{ %}
coleenp@113 3489 interface(CONST_INTER);
coleenp@113 3490 %}
coleenp@113 3491
coleenp@113 3492 // NULL Pointer Immediate
coleenp@113 3493 operand immN0() %{
coleenp@113 3494 predicate(n->get_narrowcon() == 0);
coleenp@113 3495 match(ConN);
coleenp@113 3496
coleenp@113 3497 op_cost(5);
coleenp@113 3498 format %{ %}
coleenp@113 3499 interface(CONST_INTER);
coleenp@113 3500 %}
coleenp@113 3501
duke@0 3502 operand immP31()
duke@0 3503 %{
duke@0 3504 predicate(!n->as_Type()->type()->isa_oopptr()
duke@0 3505 && (n->get_ptr() >> 31) == 0);
duke@0 3506 match(ConP);
duke@0 3507
duke@0 3508 op_cost(5);
duke@0 3509 format %{ %}
duke@0 3510 interface(CONST_INTER);
duke@0 3511 %}
duke@0 3512
coleenp@113 3513
duke@0 3514 // Long Immediate
duke@0 3515 operand immL()
duke@0 3516 %{
duke@0 3517 match(ConL);
duke@0 3518
duke@0 3519 op_cost(20);
duke@0 3520 format %{ %}
duke@0 3521 interface(CONST_INTER);
duke@0 3522 %}
duke@0 3523
duke@0 3524 // Long Immediate 8-bit
duke@0 3525 operand immL8()
duke@0 3526 %{
duke@0 3527 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
duke@0 3528 match(ConL);
duke@0 3529
duke@0 3530 op_cost(5);
duke@0 3531 format %{ %}
duke@0 3532 interface(CONST_INTER);
duke@0 3533 %}
duke@0 3534
duke@0 3535 // Long Immediate 32-bit unsigned
duke@0 3536 operand immUL32()
duke@0 3537 %{
duke@0 3538 predicate(n->get_long() == (unsigned int) (n->get_long()));
duke@0 3539 match(ConL);
duke@0 3540
duke@0 3541 op_cost(10);
duke@0 3542 format %{ %}
duke@0 3543 interface(CONST_INTER);
duke@0 3544 %}
duke@0 3545
duke@0 3546 // Long Immediate 32-bit signed
duke@0 3547 operand immL32()
duke@0 3548 %{
duke@0 3549 predicate(n->get_long() == (int) (n->get_long()));
duke@0 3550 match(ConL);
duke@0 3551
duke@0 3552 op_cost(15);
duke@0 3553 format %{ %}
duke@0 3554 interface(CONST_INTER);
duke@0 3555 %}
duke@0 3556
duke@0 3557 // Long Immediate zero
duke@0 3558 operand immL0()
duke@0 3559 %{
duke@0 3560 predicate(n->get_long() == 0L);
duke@0 3561 match(ConL);
duke@0 3562
duke@0 3563 op_cost(10);
duke@0 3564 format %{ %}
duke@0 3565 interface(CONST_INTER);
duke@0 3566 %}
duke@0 3567
duke@0 3568 // Constant for increment
duke@0 3569 operand immL1()
duke@0 3570 %{
duke@0 3571 predicate(n->get_long() == 1);
duke@0 3572 match(ConL);
duke@0 3573
duke@0 3574 format %{ %}
duke@0 3575 interface(CONST_INTER);
duke@0 3576 %}
duke@0 3577
duke@0 3578 // Constant for decrement
duke@0 3579 operand immL_M1()
duke@0 3580 %{
duke@0 3581 predicate(n->get_long() == -1);
duke@0 3582 match(ConL);
duke@0 3583
duke@0 3584 format %{ %}
duke@0 3585 interface(CONST_INTER);
duke@0 3586 %}
duke@0 3587
duke@0 3588 // Long Immediate: the value 10
duke@0 3589 operand immL10()
duke@0 3590 %{
duke@0 3591 predicate(n->get_long() == 10);
duke@0 3592 match(ConL);
duke@0 3593
duke@0 3594 format %{ %}
duke@0 3595 interface(CONST_INTER);
duke@0 3596 %}
duke@0 3597
duke@0 3598 // Long immediate from 0 to 127.
duke@0 3599 // Used for a shorter form of long mul by 10.
duke@0 3600 operand immL_127()
duke@0 3601 %{
duke@0 3602 predicate(0 <= n->get_long() && n->get_long() < 0x80);
duke@0 3603 match(ConL);
duke@0 3604
duke@0 3605 op_cost(10);
duke@0 3606 format %{ %}
duke@0 3607 interface(CONST_INTER);
duke@0 3608 %}
duke@0 3609
duke@0 3610 // Long Immediate: low 32-bit mask
duke@0 3611 operand immL_32bits()
duke@0 3612 %{
duke@0 3613 predicate(n->get_long() == 0xFFFFFFFFL);
duke@0 3614 match(ConL);
duke@0 3615 op_cost(20);
duke@0 3616
duke@0 3617 format %{ %}
duke@0 3618 interface(CONST_INTER);
duke@0 3619 %}
duke@0 3620
duke@0 3621 // Float Immediate zero
duke@0 3622 operand immF0()
duke@0 3623 %{
duke@0 3624 predicate(jint_cast(n->getf()) == 0);
duke@0 3625 match(ConF);
duke@0 3626
duke@0 3627 op_cost(5);
duke@0 3628 format %{ %}
duke@0 3629 interface(CONST_INTER);
duke@0 3630 %}
duke@0 3631
duke@0 3632 // Float Immediate
duke@0 3633 operand immF()
duke@0 3634 %{
duke@0 3635 match(ConF);
duke@0 3636
duke@0 3637 op_cost(15);
duke@0 3638 format %{ %}
duke@0 3639 interface(CONST_INTER);
duke@0 3640 %}
duke@0 3641
duke@0 3642 // Double Immediate zero
duke@0 3643 operand immD0()
duke@0 3644 %{
duke@0 3645 predicate(jlong_cast(n->getd()) == 0);
duke@0 3646 match(ConD);
duke@0 3647
duke@0 3648 op_cost(5);
duke@0 3649 format %{ %}
duke@0 3650 interface(CONST_INTER);
duke@0 3651 %}
duke@0 3652
duke@0 3653 // Double Immediate
duke@0 3654 operand immD()
duke@0 3655 %{
duke@0 3656 match(ConD);
duke@0 3657
duke@0 3658 op_cost(15);
duke@0 3659 format %{ %}
duke@0 3660 interface(CONST_INTER);
duke@0 3661 %}
duke@0 3662
duke@0 3663 // Immediates for special shifts (sign extend)
duke@0 3664
duke@0 3665 // Constants for increment
duke@0 3666 operand immI_16()
duke@0 3667 %{
duke@0 3668 predicate(n->get_int() == 16);
duke@0 3669 match(ConI);
duke@0 3670
duke@0 3671 format %{ %}
duke@0 3672 interface(CONST_INTER);
duke@0 3673 %}
duke@0 3674
duke@0 3675 operand immI_24()
duke@0 3676 %{
duke@0 3677 predicate(n->get_int() == 24);
duke@0 3678 match(ConI);
duke@0 3679
duke@0 3680 format %{ %}
duke@0 3681 interface(CONST_INTER);
duke@0 3682 %}
duke@0 3683
duke@0 3684 // Constant for byte-wide masking
duke@0 3685 operand immI_255()
duke@0 3686 %{
duke@0 3687 predicate(n->get_int() == 255);
duke@0 3688 match(ConI);
duke@0 3689
duke@0 3690 format %{ %}
duke@0 3691 interface(CONST_INTER);
duke@0 3692 %}
duke@0 3693
duke@0 3694 // Constant for short-wide masking
duke@0 3695 operand immI_65535()
duke@0 3696 %{
duke@0 3697 predicate(n->get_int() == 65535);
duke@0 3698 match(ConI);
duke@0 3699
duke@0 3700 format %{ %}
duke@0 3701 interface(CONST_INTER);
duke@0 3702 %}
duke@0 3703
duke@0 3704 // Constant for byte-wide masking
duke@0 3705 operand immL_255()
duke@0 3706 %{
duke@0 3707 predicate(n->get_long() == 255);
duke@0 3708 match(ConL);
duke@0 3709
duke@0 3710 format %{ %}
duke@0 3711 interface(CONST_INTER);
duke@0 3712 %}
duke@0 3713
duke@0 3714 // Constant for short-wide masking
duke@0 3715 operand immL_65535()
duke@0 3716 %{
duke@0 3717 predicate(n->get_long() == 65535);
duke@0 3718 match(ConL);
duke@0 3719
duke@0 3720 format %{ %}
duke@0 3721 interface(CONST_INTER);
duke@0 3722 %}
duke@0 3723
duke@0 3724 // Register Operands
duke@0 3725 // Integer Register
duke@0 3726 operand rRegI()
duke@0 3727 %{
duke@0 3728 constraint(ALLOC_IN_RC(int_reg));
duke@0 3729 match(RegI);
duke@0 3730
duke@0 3731 match(rax_RegI);
duke@0 3732 match(rbx_RegI);
duke@0 3733 match(rcx_RegI);
duke@0 3734 match(rdx_RegI);
duke@0 3735 match(rdi_RegI);
duke@0 3736
duke@0 3737 format %{ %}
duke@0 3738 interface(REG_INTER);
duke@0 3739 %}
duke@0 3740
duke@0 3741 // Special Registers
duke@0 3742 operand rax_RegI()
duke@0 3743 %{
duke@0 3744 constraint(ALLOC_IN_RC(int_rax_reg));
duke@0 3745 match(RegI);
duke@0 3746 match(rRegI);
duke@0 3747
duke@0 3748 format %{ "RAX" %}
duke@0 3749 interface(REG_INTER);
duke@0 3750 %}
duke@0 3751
duke@0 3752 // Special Registers
duke@0 3753 operand rbx_RegI()
duke@0 3754 %{
duke@0 3755 constraint(ALLOC_IN_RC(int_rbx_reg));
duke@0 3756 match(RegI);
duke@0 3757 match(rRegI);
duke@0 3758
duke@0 3759 format %{ "RBX" %}
duke@0 3760 interface(REG_INTER);
duke@0 3761 %}
duke@0 3762
duke@0 3763 operand rcx_RegI()
duke@0 3764 %{
duke@0 3765 constraint(ALLOC_IN_RC(int_rcx_reg));
duke@0 3766 match(RegI);
duke@0 3767 match(rRegI);
duke@0 3768
duke@0 3769 format %{ "RCX" %}
duke@0 3770 interface(REG_INTER);
duke@0 3771 %}
duke@0 3772
duke@0 3773 operand rdx_RegI()
duke@0 3774 %{
duke@0 3775 constraint(ALLOC_IN_RC(int_rdx_reg));
duke@0 3776 match(RegI);
duke@0 3777 match(rRegI);
duke@0 3778
duke@0 3779 format %{ "RDX" %}
duke@0 3780 interface(REG_INTER);
duke@0 3781 %}
duke@0 3782
duke@0 3783 operand rdi_RegI()
duke@0 3784 %{
duke@0 3785 constraint(ALLOC_IN_RC(int_rdi_reg));
duke@0 3786 match(RegI);
duke@0 3787 match(rRegI);
duke@0 3788
duke@0 3789 format %{ "RDI" %}
duke@0 3790 interface(REG_INTER);
duke@0 3791 %}
duke@0 3792
duke@0 3793 operand no_rcx_RegI()
duke@0 3794 %{
duke@0 3795 constraint(ALLOC_IN_RC(int_no_rcx_reg));
duke@0 3796 match(RegI);
duke@0 3797 match(rax_RegI);
duke@0 3798 match(rbx_RegI);
duke@0 3799 match(rdx_RegI);
duke@0 3800 match(rdi_RegI);
duke@0 3801
duke@0 3802 format %{ %}
duke@0 3803 interface(REG_INTER);
duke@0 3804 %}
duke@0 3805
duke@0 3806 operand no_rax_rdx_RegI()
duke@0 3807 %{
duke@0 3808 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg));
duke@0 3809 match(RegI);
duke@0 3810 match(rbx_RegI);
duke@0 3811 match(rcx_RegI);
duke@0 3812 match(rdi_RegI);
duke@0 3813
duke@0 3814 format %{ %}
duke@0 3815 interface(REG_INTER);
duke@0 3816 %}
duke@0 3817
duke@0 3818 // Pointer Register
duke@0 3819 operand any_RegP()
duke@0 3820 %{
duke@0 3821 constraint(ALLOC_IN_RC(any_reg));
duke@0 3822 match(RegP);
duke@0 3823 match(rax_RegP);
duke@0 3824 match(rbx_RegP);
duke@0 3825 match(rdi_RegP);
duke@0 3826 match(rsi_RegP);
duke@0 3827 match(rbp_RegP);
duke@0 3828 match(r15_RegP);
duke@0 3829 match(rRegP);
duke@0 3830
duke@0 3831 format %{ %}
duke@0 3832 interface(REG_INTER);
duke@0 3833 %}
duke@0 3834
duke@0 3835 operand rRegP()
duke@0 3836 %{
duke@0 3837 constraint(ALLOC_IN_RC(ptr_reg));
duke@0 3838 match(RegP);
duke@0 3839 match(rax_RegP);
duke@0 3840 match(rbx_RegP);
duke@0 3841 match(rdi_RegP);
duke@0 3842 match(rsi_RegP);
duke@0 3843 match(rbp_RegP);
duke@0 3844 match(r15_RegP); // See Q&A below about r15_RegP.
duke@0 3845
duke@0 3846 format %{ %}
duke@0 3847 interface(REG_INTER);
duke@0 3848 %}
duke@0 3849
coleenp@113 3850 operand rRegN() %{
coleenp@113 3851 constraint(ALLOC_IN_RC(int_reg));
coleenp@113 3852 match(RegN);
coleenp@113 3853
coleenp@113 3854 format %{ %}
coleenp@113 3855 interface(REG_INTER);
coleenp@113 3856 %}
coleenp@113 3857
duke@0 3858 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP?
duke@0 3859 // Answer: Operand match rules govern the DFA as it processes instruction inputs.
duke@0 3860 // It's fine for an instruction input which expects rRegP to match a r15_RegP.
duke@0 3861 // The output of an instruction is controlled by the allocator, which respects
duke@0 3862 // register class masks, not match rules. Unless an instruction mentions
duke@0 3863 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered
duke@0 3864 // by the allocator as an input.
duke@0 3865
duke@0 3866 operand no_rax_RegP()
duke@0 3867 %{
duke@0 3868 constraint(ALLOC_IN_RC(ptr_no_rax_reg));
duke@0 3869 match(RegP);
duke@0 3870 match(rbx_RegP);
duke@0 3871 match(rsi_RegP);
duke@0 3872 match(rdi_RegP);
duke@0 3873
duke@0 3874 format %{ %}
duke@0 3875 interface(REG_INTER);
duke@0 3876 %}
duke@0 3877
duke@0 3878 operand no_rbp_RegP()
duke@0 3879 %{
duke@0 3880 constraint(ALLOC_IN_RC(ptr_no_rbp_reg));
duke@0