annotate src/cpu/x86/vm/x86_64.ad @ 747:93c14e5562c4

6823354: Add intrinsics for {Integer,Long}.{numberOfLeadingZeros,numberOfTrailingZeros}() Summary: These methods can be instrinsified by using bit scan, bit test, and population count instructions. Reviewed-by: kvn, never
author twisti
date Wed, 06 May 2009 00:27:52 -0700
parents fbde8ec322d0
children 2056494941db
rev   line source
duke@0 1 //
twisti@603 2 // Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 //
duke@0 5 // This code is free software; you can redistribute it and/or modify it
duke@0 6 // under the terms of the GNU General Public License version 2 only, as
duke@0 7 // published by the Free Software Foundation.
duke@0 8 //
duke@0 9 // This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 // version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 // accompanied this code).
duke@0 14 //
duke@0 15 // You should have received a copy of the GNU General Public License version
duke@0 16 // 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 //
duke@0 19 // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 // CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 // have any questions.
duke@0 22 //
duke@0 23 //
duke@0 24
duke@0 25 // AMD64 Architecture Description File
duke@0 26
duke@0 27 //----------REGISTER DEFINITION BLOCK------------------------------------------
duke@0 28 // This information is used by the matcher and the register allocator to
duke@0 29 // describe individual registers and classes of registers within the target
duke@0 30 // archtecture.
duke@0 31
duke@0 32 register %{
duke@0 33 //----------Architecture Description Register Definitions----------------------
duke@0 34 // General Registers
duke@0 35 // "reg_def" name ( register save type, C convention save type,
duke@0 36 // ideal register type, encoding );
duke@0 37 // Register Save Types:
duke@0 38 //
duke@0 39 // NS = No-Save: The register allocator assumes that these registers
duke@0 40 // can be used without saving upon entry to the method, &
duke@0 41 // that they do not need to be saved at call sites.
duke@0 42 //
duke@0 43 // SOC = Save-On-Call: The register allocator assumes that these registers
duke@0 44 // can be used without saving upon entry to the method,
duke@0 45 // but that they must be saved at call sites.
duke@0 46 //
duke@0 47 // SOE = Save-On-Entry: The register allocator assumes that these registers
duke@0 48 // must be saved before using them upon entry to the
duke@0 49 // method, but they do not need to be saved at call
duke@0 50 // sites.
duke@0 51 //
duke@0 52 // AS = Always-Save: The register allocator assumes that these registers
duke@0 53 // must be saved before using them upon entry to the
duke@0 54 // method, & that they must be saved at call sites.
duke@0 55 //
duke@0 56 // Ideal Register Type is used to determine how to save & restore a
duke@0 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
duke@0 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
duke@0 59 //
duke@0 60 // The encoding number is the actual bit-pattern placed into the opcodes.
duke@0 61
duke@0 62 // General Registers
duke@0 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when
duke@0 64 // used as byte registers)
duke@0 65
duke@0 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code
duke@0 67 // Turn off SOE in java-code due to frequent use of uncommon-traps.
duke@0 68 // Now that allocator is better, turn on RSI and RDI as SOE registers.
duke@0 69
duke@0 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg());
duke@0 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next());
duke@0 72
duke@0 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg());
duke@0 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next());
duke@0 75
duke@0 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg());
duke@0 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next());
duke@0 78
duke@0 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg());
duke@0 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next());
duke@0 81
duke@0 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg());
duke@0 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next());
duke@0 84
duke@0 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code
duke@0 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg());
duke@0 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next());
duke@0 88
duke@0 89 #ifdef _WIN64
duke@0 90
duke@0 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg());
duke@0 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 93
duke@0 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg());
duke@0 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 96
duke@0 97 #else
duke@0 98
duke@0 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg());
duke@0 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next());
duke@0 101
duke@0 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg());
duke@0 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next());
duke@0 104
duke@0 105 #endif
duke@0 106
duke@0 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg());
duke@0 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next());
duke@0 109
duke@0 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg());
duke@0 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next());
duke@0 112
duke@0 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg());
duke@0 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
duke@0 115
duke@0 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg());
duke@0 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
duke@0 118
duke@0 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg());
duke@0 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next());
duke@0 121
duke@0 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg());
duke@0 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next());
duke@0 124
duke@0 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg());
duke@0 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next());
duke@0 127
duke@0 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg());
duke@0 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next());
duke@0 130
duke@0 131
duke@0 132 // Floating Point Registers
duke@0 133
duke@0 134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d.
duke@0 135 // Word a in each register holds a Float, words ab hold a Double. We
duke@0 136 // currently do not use the SIMD capabilities, so registers cd are
duke@0 137 // unused at the moment.
duke@0 138 // XMM8-XMM15 must be encoded with REX.
duke@0 139 // Linux ABI: No register preserved across function calls
duke@0 140 // XMM0-XMM7 might hold parameters
duke@0 141 // Windows ABI: XMM6-XMM15 preserved across function calls
duke@0 142 // XMM0-XMM3 might hold parameters
duke@0 143
duke@0 144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
duke@0 145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next());
duke@0 146
duke@0 147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
duke@0 148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next());
duke@0 149
duke@0 150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
duke@0 151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next());
duke@0 152
duke@0 153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
duke@0 154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next());
duke@0 155
duke@0 156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
duke@0 157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next());
duke@0 158
duke@0 159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
duke@0 160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next());
duke@0 161
duke@0 162 #ifdef _WIN64
duke@0 163
duke@0 164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
duke@0 165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 166
duke@0 167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
duke@0 168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 169
duke@0 170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
duke@0 171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 172
duke@0 173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
duke@0 174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 175
duke@0 176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
duke@0 177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 178
duke@0 179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
duke@0 180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 181
duke@0 182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
duke@0 183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 184
duke@0 185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
duke@0 186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 187
duke@0 188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
duke@0 189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 190
duke@0 191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
duke@0 192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 193
duke@0 194 #else
duke@0 195
duke@0 196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
duke@0 197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next());
duke@0 198
duke@0 199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
duke@0 200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next());
duke@0 201
duke@0 202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
duke@0 203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next());
duke@0 204
duke@0 205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
duke@0 206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next());
duke@0 207
duke@0 208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
duke@0 209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next());
duke@0 210
duke@0 211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
duke@0 212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next());
duke@0 213
duke@0 214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
duke@0 215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next());
duke@0 216
duke@0 217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
duke@0 218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next());
duke@0 219
duke@0 220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
duke@0 221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next());
duke@0 222
duke@0 223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
duke@0 224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next());
duke@0 225
duke@0 226 #endif // _WIN64
duke@0 227
duke@0 228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
duke@0 229
duke@0 230 // Specify priority of register selection within phases of register
duke@0 231 // allocation. Highest priority is first. A useful heuristic is to
duke@0 232 // give registers a low priority when they are required by machine
duke@0 233 // instructions, like EAX and EDX on I486, and choose no-save registers
duke@0 234 // before save-on-call, & save-on-call before save-on-entry. Registers
duke@0 235 // which participate in fixed calling sequences should come last.
duke@0 236 // Registers which are used as pairs must fall on an even boundary.
duke@0 237
duke@0 238 alloc_class chunk0(R10, R10_H,
duke@0 239 R11, R11_H,
duke@0 240 R8, R8_H,
duke@0 241 R9, R9_H,
duke@0 242 R12, R12_H,
duke@0 243 RCX, RCX_H,
duke@0 244 RBX, RBX_H,
duke@0 245 RDI, RDI_H,
duke@0 246 RDX, RDX_H,
duke@0 247 RSI, RSI_H,
duke@0 248 RAX, RAX_H,
duke@0 249 RBP, RBP_H,
duke@0 250 R13, R13_H,
duke@0 251 R14, R14_H,
duke@0 252 R15, R15_H,
duke@0 253 RSP, RSP_H);
duke@0 254
duke@0 255 // XXX probably use 8-15 first on Linux
duke@0 256 alloc_class chunk1(XMM0, XMM0_H,
duke@0 257 XMM1, XMM1_H,
duke@0 258 XMM2, XMM2_H,
duke@0 259 XMM3, XMM3_H,
duke@0 260 XMM4, XMM4_H,
duke@0 261 XMM5, XMM5_H,
duke@0 262 XMM6, XMM6_H,
duke@0 263 XMM7, XMM7_H,
duke@0 264 XMM8, XMM8_H,
duke@0 265 XMM9, XMM9_H,
duke@0 266 XMM10, XMM10_H,
duke@0 267 XMM11, XMM11_H,
duke@0 268 XMM12, XMM12_H,
duke@0 269 XMM13, XMM13_H,
duke@0 270 XMM14, XMM14_H,
duke@0 271 XMM15, XMM15_H);
duke@0 272
duke@0 273 alloc_class chunk2(RFLAGS);
duke@0 274
duke@0 275
duke@0 276 //----------Architecture Description Register Classes--------------------------
duke@0 277 // Several register classes are automatically defined based upon information in
duke@0 278 // this architecture description.
duke@0 279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
duke@0 280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ )
duke@0 281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ )
duke@0 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
duke@0 283 //
duke@0 284
duke@0 285 // Class for all pointer registers (including RSP)
duke@0 286 reg_class any_reg(RAX, RAX_H,
duke@0 287 RDX, RDX_H,
duke@0 288 RBP, RBP_H,
duke@0 289 RDI, RDI_H,
duke@0 290 RSI, RSI_H,
duke@0 291 RCX, RCX_H,
duke@0 292 RBX, RBX_H,
duke@0 293 RSP, RSP_H,
duke@0 294 R8, R8_H,
duke@0 295 R9, R9_H,
duke@0 296 R10, R10_H,
duke@0 297 R11, R11_H,
duke@0 298 R12, R12_H,
duke@0 299 R13, R13_H,
duke@0 300 R14, R14_H,
duke@0 301 R15, R15_H);
duke@0 302
duke@0 303 // Class for all pointer registers except RSP
duke@0 304 reg_class ptr_reg(RAX, RAX_H,
duke@0 305 RDX, RDX_H,
duke@0 306 RBP, RBP_H,
duke@0 307 RDI, RDI_H,
duke@0 308 RSI, RSI_H,
duke@0 309 RCX, RCX_H,
duke@0 310 RBX, RBX_H,
duke@0 311 R8, R8_H,
duke@0 312 R9, R9_H,
duke@0 313 R10, R10_H,
duke@0 314 R11, R11_H,
duke@0 315 R13, R13_H,
duke@0 316 R14, R14_H);
duke@0 317
duke@0 318 // Class for all pointer registers except RAX and RSP
duke@0 319 reg_class ptr_no_rax_reg(RDX, RDX_H,
duke@0 320 RBP, RBP_H,
duke@0 321 RDI, RDI_H,
duke@0 322 RSI, RSI_H,
duke@0 323 RCX, RCX_H,
duke@0 324 RBX, RBX_H,
duke@0 325 R8, R8_H,
duke@0 326 R9, R9_H,
duke@0 327 R10, R10_H,
duke@0 328 R11, R11_H,
duke@0 329 R13, R13_H,
duke@0 330 R14, R14_H);
duke@0 331
duke@0 332 reg_class ptr_no_rbp_reg(RDX, RDX_H,
duke@0 333 RAX, RAX_H,
duke@0 334 RDI, RDI_H,
duke@0 335 RSI, RSI_H,
duke@0 336 RCX, RCX_H,
duke@0 337 RBX, RBX_H,
duke@0 338 R8, R8_H,
duke@0 339 R9, R9_H,
duke@0 340 R10, R10_H,
duke@0 341 R11, R11_H,
duke@0 342 R13, R13_H,
duke@0 343 R14, R14_H);
duke@0 344
duke@0 345 // Class for all pointer registers except RAX, RBX and RSP
duke@0 346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H,
duke@0 347 RBP, RBP_H,
duke@0 348 RDI, RDI_H,
duke@0 349 RSI, RSI_H,
duke@0 350 RCX, RCX_H,
duke@0 351 R8, R8_H,
duke@0 352 R9, R9_H,
duke@0 353 R10, R10_H,
duke@0 354 R11, R11_H,
duke@0 355 R13, R13_H,
duke@0 356 R14, R14_H);
duke@0 357
duke@0 358 // Singleton class for RAX pointer register
duke@0 359 reg_class ptr_rax_reg(RAX, RAX_H);
duke@0 360
duke@0 361 // Singleton class for RBX pointer register
duke@0 362 reg_class ptr_rbx_reg(RBX, RBX_H);
duke@0 363
duke@0 364 // Singleton class for RSI pointer register
duke@0 365 reg_class ptr_rsi_reg(RSI, RSI_H);
duke@0 366
duke@0 367 // Singleton class for RDI pointer register
duke@0 368 reg_class ptr_rdi_reg(RDI, RDI_H);
duke@0 369
duke@0 370 // Singleton class for RBP pointer register
duke@0 371 reg_class ptr_rbp_reg(RBP, RBP_H);
duke@0 372
duke@0 373 // Singleton class for stack pointer
duke@0 374 reg_class ptr_rsp_reg(RSP, RSP_H);
duke@0 375
duke@0 376 // Singleton class for TLS pointer
duke@0 377 reg_class ptr_r15_reg(R15, R15_H);
duke@0 378
duke@0 379 // Class for all long registers (except RSP)
duke@0 380 reg_class long_reg(RAX, RAX_H,
duke@0 381 RDX, RDX_H,
duke@0 382 RBP, RBP_H,
duke@0 383 RDI, RDI_H,
duke@0 384 RSI, RSI_H,
duke@0 385 RCX, RCX_H,
duke@0 386 RBX, RBX_H,
duke@0 387 R8, R8_H,
duke@0 388 R9, R9_H,
duke@0 389 R10, R10_H,
duke@0 390 R11, R11_H,
duke@0 391 R13, R13_H,
duke@0 392 R14, R14_H);
duke@0 393
duke@0 394 // Class for all long registers except RAX, RDX (and RSP)
duke@0 395 reg_class long_no_rax_rdx_reg(RBP, RBP_H,
duke@0 396 RDI, RDI_H,
duke@0 397 RSI, RSI_H,
duke@0 398 RCX, RCX_H,
duke@0 399 RBX, RBX_H,
duke@0 400 R8, R8_H,
duke@0 401 R9, R9_H,
duke@0 402 R10, R10_H,
duke@0 403 R11, R11_H,
duke@0 404 R13, R13_H,
duke@0 405 R14, R14_H);
duke@0 406
duke@0 407 // Class for all long registers except RCX (and RSP)
duke@0 408 reg_class long_no_rcx_reg(RBP, RBP_H,
duke@0 409 RDI, RDI_H,
duke@0 410 RSI, RSI_H,
duke@0 411 RAX, RAX_H,
duke@0 412 RDX, RDX_H,
duke@0 413 RBX, RBX_H,
duke@0 414 R8, R8_H,
duke@0 415 R9, R9_H,
duke@0 416 R10, R10_H,
duke@0 417 R11, R11_H,
duke@0 418 R13, R13_H,
duke@0 419 R14, R14_H);
duke@0 420
duke@0 421 // Class for all long registers except RAX (and RSP)
duke@0 422 reg_class long_no_rax_reg(RBP, RBP_H,
duke@0 423 RDX, RDX_H,
duke@0 424 RDI, RDI_H,
duke@0 425 RSI, RSI_H,
duke@0 426 RCX, RCX_H,
duke@0 427 RBX, RBX_H,
duke@0 428 R8, R8_H,
duke@0 429 R9, R9_H,
duke@0 430 R10, R10_H,
duke@0 431 R11, R11_H,
duke@0 432 R13, R13_H,
duke@0 433 R14, R14_H);
duke@0 434
duke@0 435 // Singleton class for RAX long register
duke@0 436 reg_class long_rax_reg(RAX, RAX_H);
duke@0 437
duke@0 438 // Singleton class for RCX long register
duke@0 439 reg_class long_rcx_reg(RCX, RCX_H);
duke@0 440
duke@0 441 // Singleton class for RDX long register
duke@0 442 reg_class long_rdx_reg(RDX, RDX_H);
duke@0 443
duke@0 444 // Class for all int registers (except RSP)
duke@0 445 reg_class int_reg(RAX,
duke@0 446 RDX,
duke@0 447 RBP,
duke@0 448 RDI,
duke@0 449 RSI,
duke@0 450 RCX,
duke@0 451 RBX,
duke@0 452 R8,
duke@0 453 R9,
duke@0 454 R10,
duke@0 455 R11,
duke@0 456 R13,
duke@0 457 R14);
duke@0 458
duke@0 459 // Class for all int registers except RCX (and RSP)
duke@0 460 reg_class int_no_rcx_reg(RAX,
duke@0 461 RDX,
duke@0 462 RBP,
duke@0 463 RDI,
duke@0 464 RSI,
duke@0 465 RBX,
duke@0 466 R8,
duke@0 467 R9,
duke@0 468 R10,
duke@0 469 R11,
duke@0 470 R13,
duke@0 471 R14);
duke@0 472
duke@0 473 // Class for all int registers except RAX, RDX (and RSP)
duke@0 474 reg_class int_no_rax_rdx_reg(RBP,
never@297 475 RDI,
duke@0 476 RSI,
duke@0 477 RCX,
duke@0 478 RBX,
duke@0 479 R8,
duke@0 480 R9,
duke@0 481 R10,
duke@0 482 R11,
duke@0 483 R13,
duke@0 484 R14);
duke@0 485
duke@0 486 // Singleton class for RAX int register
duke@0 487 reg_class int_rax_reg(RAX);
duke@0 488
duke@0 489 // Singleton class for RBX int register
duke@0 490 reg_class int_rbx_reg(RBX);
duke@0 491
duke@0 492 // Singleton class for RCX int register
duke@0 493 reg_class int_rcx_reg(RCX);
duke@0 494
duke@0 495 // Singleton class for RCX int register
duke@0 496 reg_class int_rdx_reg(RDX);
duke@0 497
duke@0 498 // Singleton class for RCX int register
duke@0 499 reg_class int_rdi_reg(RDI);
duke@0 500
duke@0 501 // Singleton class for instruction pointer
duke@0 502 // reg_class ip_reg(RIP);
duke@0 503
duke@0 504 // Singleton class for condition codes
duke@0 505 reg_class int_flags(RFLAGS);
duke@0 506
duke@0 507 // Class for all float registers
duke@0 508 reg_class float_reg(XMM0,
duke@0 509 XMM1,
duke@0 510 XMM2,
duke@0 511 XMM3,
duke@0 512 XMM4,
duke@0 513 XMM5,
duke@0 514 XMM6,
duke@0 515 XMM7,
duke@0 516 XMM8,
duke@0 517 XMM9,
duke@0 518 XMM10,
duke@0 519 XMM11,
duke@0 520 XMM12,
duke@0 521 XMM13,
duke@0 522 XMM14,
duke@0 523 XMM15);
duke@0 524
duke@0 525 // Class for all double registers
duke@0 526 reg_class double_reg(XMM0, XMM0_H,
duke@0 527 XMM1, XMM1_H,
duke@0 528 XMM2, XMM2_H,
duke@0 529 XMM3, XMM3_H,
duke@0 530 XMM4, XMM4_H,
duke@0 531 XMM5, XMM5_H,
duke@0 532 XMM6, XMM6_H,
duke@0 533 XMM7, XMM7_H,
duke@0 534 XMM8, XMM8_H,
duke@0 535 XMM9, XMM9_H,
duke@0 536 XMM10, XMM10_H,
duke@0 537 XMM11, XMM11_H,
duke@0 538 XMM12, XMM12_H,
duke@0 539 XMM13, XMM13_H,
duke@0 540 XMM14, XMM14_H,
duke@0 541 XMM15, XMM15_H);
duke@0 542 %}
duke@0 543
duke@0 544
duke@0 545 //----------SOURCE BLOCK-------------------------------------------------------
duke@0 546 // This is a block of C++ code which provides values, functions, and
duke@0 547 // definitions necessary in the rest of the architecture description
duke@0 548 source %{
never@297 549 #define RELOC_IMM64 Assembler::imm_operand
duke@0 550 #define RELOC_DISP32 Assembler::disp32_operand
duke@0 551
duke@0 552 #define __ _masm.
duke@0 553
duke@0 554 // !!!!! Special hack to get all types of calls to specify the byte offset
duke@0 555 // from the start of the call to the point where the return address
duke@0 556 // will point.
duke@0 557 int MachCallStaticJavaNode::ret_addr_offset()
duke@0 558 {
duke@0 559 return 5; // 5 bytes from start of call to where return address points
duke@0 560 }
duke@0 561
duke@0 562 int MachCallDynamicJavaNode::ret_addr_offset()
duke@0 563 {
duke@0 564 return 15; // 15 bytes from start of call to where return address points
duke@0 565 }
duke@0 566
duke@0 567 // In os_cpu .ad file
duke@0 568 // int MachCallRuntimeNode::ret_addr_offset()
duke@0 569
duke@0 570 // Indicate if the safepoint node needs the polling page as an input.
duke@0 571 // Since amd64 does not have absolute addressing but RIP-relative
duke@0 572 // addressing and the polling page is within 2G, it doesn't.
duke@0 573 bool SafePointNode::needs_polling_address_input()
duke@0 574 {
duke@0 575 return false;
duke@0 576 }
duke@0 577
duke@0 578 //
duke@0 579 // Compute padding required for nodes which need alignment
duke@0 580 //
duke@0 581
duke@0 582 // The address of the call instruction needs to be 4-byte aligned to
duke@0 583 // ensure that it does not span a cache line so that it can be patched.
duke@0 584 int CallStaticJavaDirectNode::compute_padding(int current_offset) const
duke@0 585 {
duke@0 586 current_offset += 1; // skip call opcode byte
duke@0 587 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 588 }
duke@0 589
duke@0 590 // The address of the call instruction needs to be 4-byte aligned to
duke@0 591 // ensure that it does not span a cache line so that it can be patched.
duke@0 592 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
duke@0 593 {
duke@0 594 current_offset += 11; // skip movq instruction + call opcode byte
duke@0 595 return round_to(current_offset, alignment_required()) - current_offset;
duke@0 596 }
duke@0 597
duke@0 598 #ifndef PRODUCT
duke@0 599 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 600 {
duke@0 601 st->print("INT3");
duke@0 602 }
duke@0 603 #endif
duke@0 604
duke@0 605 // EMIT_RM()
duke@0 606 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3)
duke@0 607 {
duke@0 608 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
duke@0 609 *(cbuf.code_end()) = c;
duke@0 610 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 611 }
duke@0 612
duke@0 613 // EMIT_CC()
duke@0 614 void emit_cc(CodeBuffer &cbuf, int f1, int f2)
duke@0 615 {
duke@0 616 unsigned char c = (unsigned char) (f1 | f2);
duke@0 617 *(cbuf.code_end()) = c;
duke@0 618 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 619 }
duke@0 620
duke@0 621 // EMIT_OPCODE()
duke@0 622 void emit_opcode(CodeBuffer &cbuf, int code)
duke@0 623 {
duke@0 624 *(cbuf.code_end()) = (unsigned char) code;
duke@0 625 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 626 }
duke@0 627
duke@0 628 // EMIT_OPCODE() w/ relocation information
duke@0 629 void emit_opcode(CodeBuffer &cbuf,
duke@0 630 int code, relocInfo::relocType reloc, int offset, int format)
duke@0 631 {
duke@0 632 cbuf.relocate(cbuf.inst_mark() + offset, reloc, format);
duke@0 633 emit_opcode(cbuf, code);
duke@0 634 }
duke@0 635
duke@0 636 // EMIT_D8()
duke@0 637 void emit_d8(CodeBuffer &cbuf, int d8)
duke@0 638 {
duke@0 639 *(cbuf.code_end()) = (unsigned char) d8;
duke@0 640 cbuf.set_code_end(cbuf.code_end() + 1);
duke@0 641 }
duke@0 642
duke@0 643 // EMIT_D16()
duke@0 644 void emit_d16(CodeBuffer &cbuf, int d16)
duke@0 645 {
duke@0 646 *((short *)(cbuf.code_end())) = d16;
duke@0 647 cbuf.set_code_end(cbuf.code_end() + 2);
duke@0 648 }
duke@0 649
duke@0 650 // EMIT_D32()
duke@0 651 void emit_d32(CodeBuffer &cbuf, int d32)
duke@0 652 {
duke@0 653 *((int *)(cbuf.code_end())) = d32;
duke@0 654 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 655 }
duke@0 656
duke@0 657 // EMIT_D64()
duke@0 658 void emit_d64(CodeBuffer &cbuf, int64_t d64)
duke@0 659 {
duke@0 660 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 661 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 662 }
duke@0 663
duke@0 664 // emit 32 bit value and construct relocation entry from relocInfo::relocType
duke@0 665 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 666 int d32,
duke@0 667 relocInfo::relocType reloc,
duke@0 668 int format)
duke@0 669 {
duke@0 670 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc");
duke@0 671 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 672
duke@0 673 *((int*) (cbuf.code_end())) = d32;
duke@0 674 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 675 }
duke@0 676
duke@0 677 // emit 32 bit value and construct relocation entry from RelocationHolder
duke@0 678 void emit_d32_reloc(CodeBuffer& cbuf,
duke@0 679 int d32,
duke@0 680 RelocationHolder const& rspec,
duke@0 681 int format)
duke@0 682 {
duke@0 683 #ifdef ASSERT
duke@0 684 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 685 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
duke@0 686 assert(oop((intptr_t)d32)->is_oop() && oop((intptr_t)d32)->is_perm(), "cannot embed non-perm oops in code");
duke@0 687 }
duke@0 688 #endif
duke@0 689 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 690
duke@0 691 *((int* )(cbuf.code_end())) = d32;
duke@0 692 cbuf.set_code_end(cbuf.code_end() + 4);
duke@0 693 }
duke@0 694
duke@0 695 void emit_d32_reloc(CodeBuffer& cbuf, address addr) {
duke@0 696 address next_ip = cbuf.code_end() + 4;
duke@0 697 emit_d32_reloc(cbuf, (int) (addr - next_ip),
duke@0 698 external_word_Relocation::spec(addr),
duke@0 699 RELOC_DISP32);
duke@0 700 }
duke@0 701
duke@0 702
duke@0 703 // emit 64 bit value and construct relocation entry from relocInfo::relocType
duke@0 704 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 705 int64_t d64,
duke@0 706 relocInfo::relocType reloc,
duke@0 707 int format)
duke@0 708 {
duke@0 709 cbuf.relocate(cbuf.inst_mark(), reloc, format);
duke@0 710
duke@0 711 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 712 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 713 }
duke@0 714
duke@0 715 // emit 64 bit value and construct relocation entry from RelocationHolder
duke@0 716 void emit_d64_reloc(CodeBuffer& cbuf,
duke@0 717 int64_t d64,
duke@0 718 RelocationHolder const& rspec,
duke@0 719 int format)
duke@0 720 {
duke@0 721 #ifdef ASSERT
duke@0 722 if (rspec.reloc()->type() == relocInfo::oop_type &&
duke@0 723 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
duke@0 724 assert(oop(d64)->is_oop() && oop(d64)->is_perm(),
duke@0 725 "cannot embed non-perm oops in code");
duke@0 726 }
duke@0 727 #endif
duke@0 728 cbuf.relocate(cbuf.inst_mark(), rspec, format);
duke@0 729
duke@0 730 *((int64_t*) (cbuf.code_end())) = d64;
duke@0 731 cbuf.set_code_end(cbuf.code_end() + 8);
duke@0 732 }
duke@0 733
duke@0 734 // Access stack slot for load or store
duke@0 735 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp)
duke@0 736 {
duke@0 737 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src])
duke@0 738 if (-0x80 <= disp && disp < 0x80) {
duke@0 739 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte
duke@0 740 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 741 emit_d8(cbuf, disp); // Displacement // R/M byte
duke@0 742 } else {
duke@0 743 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte
duke@0 744 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte
duke@0 745 emit_d32(cbuf, disp); // Displacement // R/M byte
duke@0 746 }
duke@0 747 }
duke@0 748
duke@0 749 // rRegI ereg, memory mem) %{ // emit_reg_mem
duke@0 750 void encode_RegMem(CodeBuffer &cbuf,
duke@0 751 int reg,
duke@0 752 int base, int index, int scale, int disp, bool disp_is_oop)
duke@0 753 {
duke@0 754 assert(!disp_is_oop, "cannot have disp");
duke@0 755 int regenc = reg & 7;
duke@0 756 int baseenc = base & 7;
duke@0 757 int indexenc = index & 7;
duke@0 758
duke@0 759 // There is no index & no scale, use form without SIB byte
duke@0 760 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) {
duke@0 761 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 762 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 763 emit_rm(cbuf, 0x0, regenc, baseenc); // *
duke@0 764 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 765 // If 8-bit displacement, mode 0x1
duke@0 766 emit_rm(cbuf, 0x1, regenc, baseenc); // *
duke@0 767 emit_d8(cbuf, disp);
duke@0 768 } else {
duke@0 769 // If 32-bit displacement
duke@0 770 if (base == -1) { // Special flag for absolute address
duke@0 771 emit_rm(cbuf, 0x0, regenc, 0x5); // *
duke@0 772 if (disp_is_oop) {
duke@0 773 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 774 } else {
duke@0 775 emit_d32(cbuf, disp);
duke@0 776 }
duke@0 777 } else {
duke@0 778 // Normal base + offset
duke@0 779 emit_rm(cbuf, 0x2, regenc, baseenc); // *
duke@0 780 if (disp_is_oop) {
duke@0 781 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 782 } else {
duke@0 783 emit_d32(cbuf, disp);
duke@0 784 }
duke@0 785 }
duke@0 786 }
duke@0 787 } else {
duke@0 788 // Else, encode with the SIB byte
duke@0 789 // If no displacement, mode is 0x0; unless base is [RBP] or [R13]
duke@0 790 if (disp == 0 && base != RBP_enc && base != R13_enc) {
duke@0 791 // If no displacement
duke@0 792 emit_rm(cbuf, 0x0, regenc, 0x4); // *
duke@0 793 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 794 } else {
duke@0 795 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) {
duke@0 796 // If 8-bit displacement, mode 0x1
duke@0 797 emit_rm(cbuf, 0x1, regenc, 0x4); // *
duke@0 798 emit_rm(cbuf, scale, indexenc, baseenc);
duke@0 799 emit_d8(cbuf, disp);
duke@0 800 } else {
duke@0 801 // If 32-bit displacement
duke@0 802 if (base == 0x04 ) {
duke@0 803 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 804 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid???
duke@0 805 } else {
duke@0 806 emit_rm(cbuf, 0x2, regenc, 0x4);
duke@0 807 emit_rm(cbuf, scale, indexenc, baseenc); // *
duke@0 808 }
duke@0 809 if (disp_is_oop) {
duke@0 810 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32);
duke@0 811 } else {
duke@0 812 emit_d32(cbuf, disp);
duke@0 813 }
duke@0 814 }
duke@0 815 }
duke@0 816 }
duke@0 817 }
duke@0 818
duke@0 819 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc)
duke@0 820 {
duke@0 821 if (dstenc != srcenc) {
duke@0 822 if (dstenc < 8) {
duke@0 823 if (srcenc >= 8) {
duke@0 824 emit_opcode(cbuf, Assembler::REX_B);
duke@0 825 srcenc -= 8;
duke@0 826 }
duke@0 827 } else {
duke@0 828 if (srcenc < 8) {
duke@0 829 emit_opcode(cbuf, Assembler::REX_R);
duke@0 830 } else {
duke@0 831 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 832 srcenc -= 8;
duke@0 833 }
duke@0 834 dstenc -= 8;
duke@0 835 }
duke@0 836
duke@0 837 emit_opcode(cbuf, 0x8B);
duke@0 838 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 839 }
duke@0 840 }
duke@0 841
duke@0 842 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
duke@0 843 if( dst_encoding == src_encoding ) {
duke@0 844 // reg-reg copy, use an empty encoding
duke@0 845 } else {
duke@0 846 MacroAssembler _masm(&cbuf);
duke@0 847
duke@0 848 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding));
duke@0 849 }
duke@0 850 }
duke@0 851
duke@0 852
duke@0 853 //=============================================================================
duke@0 854 #ifndef PRODUCT
duke@0 855 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 856 {
duke@0 857 Compile* C = ra_->C;
duke@0 858
duke@0 859 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 860 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 861 // Remove wordSize for return adr already pushed
duke@0 862 // and another for the RBP we are going to save
duke@0 863 framesize -= 2*wordSize;
duke@0 864 bool need_nop = true;
duke@0 865
duke@0 866 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 867 // We require that their callers must bang for them. But be
duke@0 868 // careful, because some VM calls (such as call site linkage) can
duke@0 869 // use several kilobytes of stack. But the stack safety zone should
duke@0 870 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 871 if (C->need_stack_bang(framesize)) {
duke@0 872 st->print_cr("# stack bang"); st->print("\t");
duke@0 873 need_nop = false;
duke@0 874 }
duke@0 875 st->print_cr("pushq rbp"); st->print("\t");
duke@0 876
duke@0 877 if (VerifyStackAtCalls) {
duke@0 878 // Majik cookie to verify stack depth
duke@0 879 st->print_cr("pushq 0xffffffffbadb100d"
duke@0 880 "\t# Majik cookie for stack depth check");
duke@0 881 st->print("\t");
duke@0 882 framesize -= wordSize; // Remove 2 for cookie
duke@0 883 need_nop = false;
duke@0 884 }
duke@0 885
duke@0 886 if (framesize) {
duke@0 887 st->print("subq rsp, #%d\t# Create frame", framesize);
duke@0 888 if (framesize < 0x80 && need_nop) {
duke@0 889 st->print("\n\tnop\t# nop for patch_verified_entry");
duke@0 890 }
duke@0 891 }
duke@0 892 }
duke@0 893 #endif
duke@0 894
duke@0 895 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 896 {
duke@0 897 Compile* C = ra_->C;
duke@0 898
duke@0 899 // WARNING: Initial instruction MUST be 5 bytes or longer so that
duke@0 900 // NativeJump::patch_verified_entry will be able to patch out the entry
duke@0 901 // code safely. The fldcw is ok at 6 bytes, the push to verify stack
duke@0 902 // depth is ok at 5 bytes, the frame allocation can be either 3 or
duke@0 903 // 6 bytes. So if we don't do the fldcw or the push then we must
duke@0 904 // use the 6 byte frame allocation even if we have no frame. :-(
duke@0 905 // If method sets FPU control word do it now
duke@0 906
duke@0 907 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 908 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 909 // Remove wordSize for return adr already pushed
duke@0 910 // and another for the RBP we are going to save
duke@0 911 framesize -= 2*wordSize;
duke@0 912 bool need_nop = true;
duke@0 913
duke@0 914 // Calls to C2R adapters often do not accept exceptional returns.
duke@0 915 // We require that their callers must bang for them. But be
duke@0 916 // careful, because some VM calls (such as call site linkage) can
duke@0 917 // use several kilobytes of stack. But the stack safety zone should
duke@0 918 // account for that. See bugs 4446381, 4468289, 4497237.
duke@0 919 if (C->need_stack_bang(framesize)) {
duke@0 920 MacroAssembler masm(&cbuf);
duke@0 921 masm.generate_stack_overflow_check(framesize);
duke@0 922 need_nop = false;
duke@0 923 }
duke@0 924
duke@0 925 // We always push rbp so that on return to interpreter rbp will be
duke@0 926 // restored correctly and we can correct the stack.
duke@0 927 emit_opcode(cbuf, 0x50 | RBP_enc);
duke@0 928
duke@0 929 if (VerifyStackAtCalls) {
duke@0 930 // Majik cookie to verify stack depth
duke@0 931 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
duke@0 932 emit_d32(cbuf, 0xbadb100d);
duke@0 933 framesize -= wordSize; // Remove 2 for cookie
duke@0 934 need_nop = false;
duke@0 935 }
duke@0 936
duke@0 937 if (framesize) {
duke@0 938 emit_opcode(cbuf, Assembler::REX_W);
duke@0 939 if (framesize < 0x80) {
duke@0 940 emit_opcode(cbuf, 0x83); // sub SP,#framesize
duke@0 941 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 942 emit_d8(cbuf, framesize);
duke@0 943 if (need_nop) {
duke@0 944 emit_opcode(cbuf, 0x90); // nop
duke@0 945 }
duke@0 946 } else {
duke@0 947 emit_opcode(cbuf, 0x81); // sub SP,#framesize
duke@0 948 emit_rm(cbuf, 0x3, 0x05, RSP_enc);
duke@0 949 emit_d32(cbuf, framesize);
duke@0 950 }
duke@0 951 }
duke@0 952
duke@0 953 C->set_frame_complete(cbuf.code_end() - cbuf.code_begin());
duke@0 954
duke@0 955 #ifdef ASSERT
duke@0 956 if (VerifyStackAtCalls) {
duke@0 957 Label L;
duke@0 958 MacroAssembler masm(&cbuf);
never@297 959 masm.push(rax);
never@297 960 masm.mov(rax, rsp);
never@297 961 masm.andptr(rax, StackAlignmentInBytes-1);
never@297 962 masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
never@297 963 masm.pop(rax);
duke@0 964 masm.jcc(Assembler::equal, L);
duke@0 965 masm.stop("Stack is not properly aligned!");
duke@0 966 masm.bind(L);
duke@0 967 }
duke@0 968 #endif
duke@0 969 }
duke@0 970
duke@0 971 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
duke@0 972 {
duke@0 973 return MachNode::size(ra_); // too many variables; just compute it
duke@0 974 // the hard way
duke@0 975 }
duke@0 976
duke@0 977 int MachPrologNode::reloc() const
duke@0 978 {
duke@0 979 return 0; // a large enough number
duke@0 980 }
duke@0 981
duke@0 982 //=============================================================================
duke@0 983 #ifndef PRODUCT
duke@0 984 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 985 {
duke@0 986 Compile* C = ra_->C;
duke@0 987 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 988 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 989 // Remove word for return adr already pushed
duke@0 990 // and RBP
duke@0 991 framesize -= 2*wordSize;
duke@0 992
duke@0 993 if (framesize) {
duke@0 994 st->print_cr("addq\trsp, %d\t# Destroy frame", framesize);
duke@0 995 st->print("\t");
duke@0 996 }
duke@0 997
duke@0 998 st->print_cr("popq\trbp");
duke@0 999 if (do_polling() && C->is_method_compilation()) {
duke@0 1000 st->print_cr("\ttestl\trax, [rip + #offset_to_poll_page]\t"
duke@0 1001 "# Safepoint: poll for GC");
duke@0 1002 st->print("\t");
duke@0 1003 }
duke@0 1004 }
duke@0 1005 #endif
duke@0 1006
duke@0 1007 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1008 {
duke@0 1009 Compile* C = ra_->C;
duke@0 1010 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1011 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1012 // Remove word for return adr already pushed
duke@0 1013 // and RBP
duke@0 1014 framesize -= 2*wordSize;
duke@0 1015
duke@0 1016 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
duke@0 1017
duke@0 1018 if (framesize) {
duke@0 1019 emit_opcode(cbuf, Assembler::REX_W);
duke@0 1020 if (framesize < 0x80) {
duke@0 1021 emit_opcode(cbuf, 0x83); // addq rsp, #framesize
duke@0 1022 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1023 emit_d8(cbuf, framesize);
duke@0 1024 } else {
duke@0 1025 emit_opcode(cbuf, 0x81); // addq rsp, #framesize
duke@0 1026 emit_rm(cbuf, 0x3, 0x00, RSP_enc);
duke@0 1027 emit_d32(cbuf, framesize);
duke@0 1028 }
duke@0 1029 }
duke@0 1030
duke@0 1031 // popq rbp
duke@0 1032 emit_opcode(cbuf, 0x58 | RBP_enc);
duke@0 1033
duke@0 1034 if (do_polling() && C->is_method_compilation()) {
duke@0 1035 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes
duke@0 1036 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 1037 cbuf.set_inst_mark();
duke@0 1038 cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX
duke@0 1039 emit_opcode(cbuf, 0x85); // testl
duke@0 1040 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5
duke@0 1041 // cbuf.inst_mark() is beginning of instruction
duke@0 1042 emit_d32_reloc(cbuf, os::get_polling_page());
duke@0 1043 // relocInfo::poll_return_type,
duke@0 1044 }
duke@0 1045 }
duke@0 1046
duke@0 1047 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const
duke@0 1048 {
duke@0 1049 Compile* C = ra_->C;
duke@0 1050 int framesize = C->frame_slots() << LogBytesPerInt;
duke@0 1051 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
duke@0 1052 // Remove word for return adr already pushed
duke@0 1053 // and RBP
duke@0 1054 framesize -= 2*wordSize;
duke@0 1055
duke@0 1056 uint size = 0;
duke@0 1057
duke@0 1058 if (do_polling() && C->is_method_compilation()) {
duke@0 1059 size += 6;
duke@0 1060 }
duke@0 1061
duke@0 1062 // count popq rbp
duke@0 1063 size++;
duke@0 1064
duke@0 1065 if (framesize) {
duke@0 1066 if (framesize < 0x80) {
duke@0 1067 size += 4;
duke@0 1068 } else if (framesize) {
duke@0 1069 size += 7;
duke@0 1070 }
duke@0 1071 }
duke@0 1072
duke@0 1073 return size;
duke@0 1074 }
duke@0 1075
duke@0 1076 int MachEpilogNode::reloc() const
duke@0 1077 {
duke@0 1078 return 2; // a large enough number
duke@0 1079 }
duke@0 1080
duke@0 1081 const Pipeline* MachEpilogNode::pipeline() const
duke@0 1082 {
duke@0 1083 return MachNode::pipeline_class();
duke@0 1084 }
duke@0 1085
duke@0 1086 int MachEpilogNode::safepoint_offset() const
duke@0 1087 {
duke@0 1088 return 0;
duke@0 1089 }
duke@0 1090
duke@0 1091 //=============================================================================
duke@0 1092
duke@0 1093 enum RC {
duke@0 1094 rc_bad,
duke@0 1095 rc_int,
duke@0 1096 rc_float,
duke@0 1097 rc_stack
duke@0 1098 };
duke@0 1099
duke@0 1100 static enum RC rc_class(OptoReg::Name reg)
duke@0 1101 {
duke@0 1102 if( !OptoReg::is_valid(reg) ) return rc_bad;
duke@0 1103
duke@0 1104 if (OptoReg::is_stack(reg)) return rc_stack;
duke@0 1105
duke@0 1106 VMReg r = OptoReg::as_VMReg(reg);
duke@0 1107
duke@0 1108 if (r->is_Register()) return rc_int;
duke@0 1109
duke@0 1110 assert(r->is_XMMRegister(), "must be");
duke@0 1111 return rc_float;
duke@0 1112 }
duke@0 1113
duke@0 1114 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
duke@0 1115 PhaseRegAlloc* ra_,
duke@0 1116 bool do_size,
duke@0 1117 outputStream* st) const
duke@0 1118 {
duke@0 1119
duke@0 1120 // Get registers to move
duke@0 1121 OptoReg::Name src_second = ra_->get_reg_second(in(1));
duke@0 1122 OptoReg::Name src_first = ra_->get_reg_first(in(1));
duke@0 1123 OptoReg::Name dst_second = ra_->get_reg_second(this);
duke@0 1124 OptoReg::Name dst_first = ra_->get_reg_first(this);
duke@0 1125
duke@0 1126 enum RC src_second_rc = rc_class(src_second);
duke@0 1127 enum RC src_first_rc = rc_class(src_first);
duke@0 1128 enum RC dst_second_rc = rc_class(dst_second);
duke@0 1129 enum RC dst_first_rc = rc_class(dst_first);
duke@0 1130
duke@0 1131 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first),
duke@0 1132 "must move at least 1 register" );
duke@0 1133
duke@0 1134 if (src_first == dst_first && src_second == dst_second) {
duke@0 1135 // Self copy, no move
duke@0 1136 return 0;
duke@0 1137 } else if (src_first_rc == rc_stack) {
duke@0 1138 // mem ->
duke@0 1139 if (dst_first_rc == rc_stack) {
duke@0 1140 // mem -> mem
duke@0 1141 assert(src_second != dst_first, "overlap");
duke@0 1142 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1143 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1144 // 64-bit
duke@0 1145 int src_offset = ra_->reg2offset(src_first);
duke@0 1146 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1147 if (cbuf) {
duke@0 1148 emit_opcode(*cbuf, 0xFF);
duke@0 1149 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false);
duke@0 1150
duke@0 1151 emit_opcode(*cbuf, 0x8F);
duke@0 1152 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false);
duke@0 1153
duke@0 1154 #ifndef PRODUCT
duke@0 1155 } else if (!do_size) {
duke@0 1156 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t"
duke@0 1157 "popq [rsp + #%d]",
duke@0 1158 src_offset,
duke@0 1159 dst_offset);
duke@0 1160 #endif
duke@0 1161 }
duke@0 1162 return
duke@0 1163 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) +
duke@0 1164 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4));
duke@0 1165 } else {
duke@0 1166 // 32-bit
duke@0 1167 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1168 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1169 // No pushl/popl, so:
duke@0 1170 int src_offset = ra_->reg2offset(src_first);
duke@0 1171 int dst_offset = ra_->reg2offset(dst_first);
duke@0 1172 if (cbuf) {
duke@0 1173 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1174 emit_opcode(*cbuf, 0x89);
duke@0 1175 emit_opcode(*cbuf, 0x44);
duke@0 1176 emit_opcode(*cbuf, 0x24);
duke@0 1177 emit_opcode(*cbuf, 0xF8);
duke@0 1178
duke@0 1179 emit_opcode(*cbuf, 0x8B);
duke@0 1180 encode_RegMem(*cbuf,
duke@0 1181 RAX_enc,
duke@0 1182 RSP_enc, 0x4, 0, src_offset,
duke@0 1183 false);
duke@0 1184
duke@0 1185 emit_opcode(*cbuf, 0x89);
duke@0 1186 encode_RegMem(*cbuf,
duke@0 1187 RAX_enc,
duke@0 1188 RSP_enc, 0x4, 0, dst_offset,
duke@0 1189 false);
duke@0 1190
duke@0 1191 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1192 emit_opcode(*cbuf, 0x8B);
duke@0 1193 emit_opcode(*cbuf, 0x44);
duke@0 1194 emit_opcode(*cbuf, 0x24);
duke@0 1195 emit_opcode(*cbuf, 0xF8);
duke@0 1196
duke@0 1197 #ifndef PRODUCT
duke@0 1198 } else if (!do_size) {
duke@0 1199 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t"
duke@0 1200 "movl rax, [rsp + #%d]\n\t"
duke@0 1201 "movl [rsp + #%d], rax\n\t"
duke@0 1202 "movq rax, [rsp - #8]",
duke@0 1203 src_offset,
duke@0 1204 dst_offset);
duke@0 1205 #endif
duke@0 1206 }
duke@0 1207 return
duke@0 1208 5 + // movq
duke@0 1209 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1210 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl
duke@0 1211 5; // movq
duke@0 1212 }
duke@0 1213 } else if (dst_first_rc == rc_int) {
duke@0 1214 // mem -> gpr
duke@0 1215 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1216 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1217 // 64-bit
duke@0 1218 int offset = ra_->reg2offset(src_first);
duke@0 1219 if (cbuf) {
duke@0 1220 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1221 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1222 } else {
duke@0 1223 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1224 }
duke@0 1225 emit_opcode(*cbuf, 0x8B);
duke@0 1226 encode_RegMem(*cbuf,
duke@0 1227 Matcher::_regEncode[dst_first],
duke@0 1228 RSP_enc, 0x4, 0, offset,
duke@0 1229 false);
duke@0 1230 #ifndef PRODUCT
duke@0 1231 } else if (!do_size) {
duke@0 1232 st->print("movq %s, [rsp + #%d]\t# spill",
duke@0 1233 Matcher::regName[dst_first],
duke@0 1234 offset);
duke@0 1235 #endif
duke@0 1236 }
duke@0 1237 return
duke@0 1238 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1239 } else {
duke@0 1240 // 32-bit
duke@0 1241 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1242 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1243 int offset = ra_->reg2offset(src_first);
duke@0 1244 if (cbuf) {
duke@0 1245 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1246 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1247 }
duke@0 1248 emit_opcode(*cbuf, 0x8B);
duke@0 1249 encode_RegMem(*cbuf,
duke@0 1250 Matcher::_regEncode[dst_first],
duke@0 1251 RSP_enc, 0x4, 0, offset,
duke@0 1252 false);
duke@0 1253 #ifndef PRODUCT
duke@0 1254 } else if (!do_size) {
duke@0 1255 st->print("movl %s, [rsp + #%d]\t# spill",
duke@0 1256 Matcher::regName[dst_first],
duke@0 1257 offset);
duke@0 1258 #endif
duke@0 1259 }
duke@0 1260 return
duke@0 1261 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1262 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1263 ? 3
duke@0 1264 : 4); // REX
duke@0 1265 }
duke@0 1266 } else if (dst_first_rc == rc_float) {
duke@0 1267 // mem-> xmm
duke@0 1268 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1269 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1270 // 64-bit
duke@0 1271 int offset = ra_->reg2offset(src_first);
duke@0 1272 if (cbuf) {
duke@0 1273 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 1274 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1275 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1276 }
duke@0 1277 emit_opcode(*cbuf, 0x0F);
duke@0 1278 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 1279 encode_RegMem(*cbuf,
duke@0 1280 Matcher::_regEncode[dst_first],
duke@0 1281 RSP_enc, 0x4, 0, offset,
duke@0 1282 false);
duke@0 1283 #ifndef PRODUCT
duke@0 1284 } else if (!do_size) {
duke@0 1285 st->print("%s %s, [rsp + #%d]\t# spill",
duke@0 1286 UseXmmLoadAndClearUpper ? "movsd " : "movlpd",
duke@0 1287 Matcher::regName[dst_first],
duke@0 1288 offset);
duke@0 1289 #endif
duke@0 1290 }
duke@0 1291 return
duke@0 1292 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1293 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1294 ? 5
duke@0 1295 : 6); // REX
duke@0 1296 } else {
duke@0 1297 // 32-bit
duke@0 1298 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1299 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1300 int offset = ra_->reg2offset(src_first);
duke@0 1301 if (cbuf) {
duke@0 1302 emit_opcode(*cbuf, 0xF3);
duke@0 1303 if (Matcher::_regEncode[dst_first] >= 8) {
duke@0 1304 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1305 }
duke@0 1306 emit_opcode(*cbuf, 0x0F);
duke@0 1307 emit_opcode(*cbuf, 0x10);
duke@0 1308 encode_RegMem(*cbuf,
duke@0 1309 Matcher::_regEncode[dst_first],
duke@0 1310 RSP_enc, 0x4, 0, offset,
duke@0 1311 false);
duke@0 1312 #ifndef PRODUCT
duke@0 1313 } else if (!do_size) {
duke@0 1314 st->print("movss %s, [rsp + #%d]\t# spill",
duke@0 1315 Matcher::regName[dst_first],
duke@0 1316 offset);
duke@0 1317 #endif
duke@0 1318 }
duke@0 1319 return
duke@0 1320 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1321 ((Matcher::_regEncode[dst_first] < 8)
duke@0 1322 ? 5
duke@0 1323 : 6); // REX
duke@0 1324 }
duke@0 1325 }
duke@0 1326 } else if (src_first_rc == rc_int) {
duke@0 1327 // gpr ->
duke@0 1328 if (dst_first_rc == rc_stack) {
duke@0 1329 // gpr -> mem
duke@0 1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1332 // 64-bit
duke@0 1333 int offset = ra_->reg2offset(dst_first);
duke@0 1334 if (cbuf) {
duke@0 1335 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1336 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1337 } else {
duke@0 1338 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1339 }
duke@0 1340 emit_opcode(*cbuf, 0x89);
duke@0 1341 encode_RegMem(*cbuf,
duke@0 1342 Matcher::_regEncode[src_first],
duke@0 1343 RSP_enc, 0x4, 0, offset,
duke@0 1344 false);
duke@0 1345 #ifndef PRODUCT
duke@0 1346 } else if (!do_size) {
duke@0 1347 st->print("movq [rsp + #%d], %s\t# spill",
duke@0 1348 offset,
duke@0 1349 Matcher::regName[src_first]);
duke@0 1350 #endif
duke@0 1351 }
duke@0 1352 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX
duke@0 1353 } else {
duke@0 1354 // 32-bit
duke@0 1355 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1356 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1357 int offset = ra_->reg2offset(dst_first);
duke@0 1358 if (cbuf) {
duke@0 1359 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1360 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1361 }
duke@0 1362 emit_opcode(*cbuf, 0x89);
duke@0 1363 encode_RegMem(*cbuf,
duke@0 1364 Matcher::_regEncode[src_first],
duke@0 1365 RSP_enc, 0x4, 0, offset,
duke@0 1366 false);
duke@0 1367 #ifndef PRODUCT
duke@0 1368 } else if (!do_size) {
duke@0 1369 st->print("movl [rsp + #%d], %s\t# spill",
duke@0 1370 offset,
duke@0 1371 Matcher::regName[src_first]);
duke@0 1372 #endif
duke@0 1373 }
duke@0 1374 return
duke@0 1375 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1376 ((Matcher::_regEncode[src_first] < 8)
duke@0 1377 ? 3
duke@0 1378 : 4); // REX
duke@0 1379 }
duke@0 1380 } else if (dst_first_rc == rc_int) {
duke@0 1381 // gpr -> gpr
duke@0 1382 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1383 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1384 // 64-bit
duke@0 1385 if (cbuf) {
duke@0 1386 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1387 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1388 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1389 } else {
duke@0 1390 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1391 }
duke@0 1392 } else {
duke@0 1393 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1394 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1395 } else {
duke@0 1396 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1397 }
duke@0 1398 }
duke@0 1399 emit_opcode(*cbuf, 0x8B);
duke@0 1400 emit_rm(*cbuf, 0x3,
duke@0 1401 Matcher::_regEncode[dst_first] & 7,
duke@0 1402 Matcher::_regEncode[src_first] & 7);
duke@0 1403 #ifndef PRODUCT
duke@0 1404 } else if (!do_size) {
duke@0 1405 st->print("movq %s, %s\t# spill",
duke@0 1406 Matcher::regName[dst_first],
duke@0 1407 Matcher::regName[src_first]);
duke@0 1408 #endif
duke@0 1409 }
duke@0 1410 return 3; // REX
duke@0 1411 } else {
duke@0 1412 // 32-bit
duke@0 1413 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1414 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1415 if (cbuf) {
duke@0 1416 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1417 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1418 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1419 }
duke@0 1420 } else {
duke@0 1421 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1422 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1423 } else {
duke@0 1424 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1425 }
duke@0 1426 }
duke@0 1427 emit_opcode(*cbuf, 0x8B);
duke@0 1428 emit_rm(*cbuf, 0x3,
duke@0 1429 Matcher::_regEncode[dst_first] & 7,
duke@0 1430 Matcher::_regEncode[src_first] & 7);
duke@0 1431 #ifndef PRODUCT
duke@0 1432 } else if (!do_size) {
duke@0 1433 st->print("movl %s, %s\t# spill",
duke@0 1434 Matcher::regName[dst_first],
duke@0 1435 Matcher::regName[src_first]);
duke@0 1436 #endif
duke@0 1437 }
duke@0 1438 return
duke@0 1439 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1440 ? 2
duke@0 1441 : 3; // REX
duke@0 1442 }
duke@0 1443 } else if (dst_first_rc == rc_float) {
duke@0 1444 // gpr -> xmm
duke@0 1445 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1446 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1447 // 64-bit
duke@0 1448 if (cbuf) {
duke@0 1449 emit_opcode(*cbuf, 0x66);
duke@0 1450 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1451 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1452 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1453 } else {
duke@0 1454 emit_opcode(*cbuf, Assembler::REX_WB);
duke@0 1455 }
duke@0 1456 } else {
duke@0 1457 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1458 emit_opcode(*cbuf, Assembler::REX_WR);
duke@0 1459 } else {
duke@0 1460 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1461 }
duke@0 1462 }
duke@0 1463 emit_opcode(*cbuf, 0x0F);
duke@0 1464 emit_opcode(*cbuf, 0x6E);
duke@0 1465 emit_rm(*cbuf, 0x3,
duke@0 1466 Matcher::_regEncode[dst_first] & 7,
duke@0 1467 Matcher::_regEncode[src_first] & 7);
duke@0 1468 #ifndef PRODUCT
duke@0 1469 } else if (!do_size) {
duke@0 1470 st->print("movdq %s, %s\t# spill",
duke@0 1471 Matcher::regName[dst_first],
duke@0 1472 Matcher::regName[src_first]);
duke@0 1473 #endif
duke@0 1474 }
duke@0 1475 return 5; // REX
duke@0 1476 } else {
duke@0 1477 // 32-bit
duke@0 1478 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1479 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1480 if (cbuf) {
duke@0 1481 emit_opcode(*cbuf, 0x66);
duke@0 1482 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1483 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1484 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1485 }
duke@0 1486 } else {
duke@0 1487 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1488 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1489 } else {
duke@0 1490 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1491 }
duke@0 1492 }
duke@0 1493 emit_opcode(*cbuf, 0x0F);
duke@0 1494 emit_opcode(*cbuf, 0x6E);
duke@0 1495 emit_rm(*cbuf, 0x3,
duke@0 1496 Matcher::_regEncode[dst_first] & 7,
duke@0 1497 Matcher::_regEncode[src_first] & 7);
duke@0 1498 #ifndef PRODUCT
duke@0 1499 } else if (!do_size) {
duke@0 1500 st->print("movdl %s, %s\t# spill",
duke@0 1501 Matcher::regName[dst_first],
duke@0 1502 Matcher::regName[src_first]);
duke@0 1503 #endif
duke@0 1504 }
duke@0 1505 return
duke@0 1506 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1507 ? 4
duke@0 1508 : 5; // REX
duke@0 1509 }
duke@0 1510 }
duke@0 1511 } else if (src_first_rc == rc_float) {
duke@0 1512 // xmm ->
duke@0 1513 if (dst_first_rc == rc_stack) {
duke@0 1514 // xmm -> mem
duke@0 1515 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1516 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1517 // 64-bit
duke@0 1518 int offset = ra_->reg2offset(dst_first);
duke@0 1519 if (cbuf) {
duke@0 1520 emit_opcode(*cbuf, 0xF2);
duke@0 1521 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1522 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1523 }
duke@0 1524 emit_opcode(*cbuf, 0x0F);
duke@0 1525 emit_opcode(*cbuf, 0x11);
duke@0 1526 encode_RegMem(*cbuf,
duke@0 1527 Matcher::_regEncode[src_first],
duke@0 1528 RSP_enc, 0x4, 0, offset,
duke@0 1529 false);
duke@0 1530 #ifndef PRODUCT
duke@0 1531 } else if (!do_size) {
duke@0 1532 st->print("movsd [rsp + #%d], %s\t# spill",
duke@0 1533 offset,
duke@0 1534 Matcher::regName[src_first]);
duke@0 1535 #endif
duke@0 1536 }
duke@0 1537 return
duke@0 1538 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1539 ((Matcher::_regEncode[src_first] < 8)
duke@0 1540 ? 5
duke@0 1541 : 6); // REX
duke@0 1542 } else {
duke@0 1543 // 32-bit
duke@0 1544 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1545 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1546 int offset = ra_->reg2offset(dst_first);
duke@0 1547 if (cbuf) {
duke@0 1548 emit_opcode(*cbuf, 0xF3);
duke@0 1549 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1550 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1551 }
duke@0 1552 emit_opcode(*cbuf, 0x0F);
duke@0 1553 emit_opcode(*cbuf, 0x11);
duke@0 1554 encode_RegMem(*cbuf,
duke@0 1555 Matcher::_regEncode[src_first],
duke@0 1556 RSP_enc, 0x4, 0, offset,
duke@0 1557 false);
duke@0 1558 #ifndef PRODUCT
duke@0 1559 } else if (!do_size) {
duke@0 1560 st->print("movss [rsp + #%d], %s\t# spill",
duke@0 1561 offset,
duke@0 1562 Matcher::regName[src_first]);
duke@0 1563 #endif
duke@0 1564 }
duke@0 1565 return
duke@0 1566 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) +
duke@0 1567 ((Matcher::_regEncode[src_first] < 8)
duke@0 1568 ? 5
duke@0 1569 : 6); // REX
duke@0 1570 }
duke@0 1571 } else if (dst_first_rc == rc_int) {
duke@0 1572 // xmm -> gpr
duke@0 1573 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1574 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1575 // 64-bit
duke@0 1576 if (cbuf) {
duke@0 1577 emit_opcode(*cbuf, 0x66);
duke@0 1578 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1579 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1580 emit_opcode(*cbuf, Assembler::REX_W);
duke@0 1581 } else {
duke@0 1582 emit_opcode(*cbuf, Assembler::REX_WR); // attention!
duke@0 1583 }
duke@0 1584 } else {
duke@0 1585 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1586 emit_opcode(*cbuf, Assembler::REX_WB); // attention!
duke@0 1587 } else {
duke@0 1588 emit_opcode(*cbuf, Assembler::REX_WRB);
duke@0 1589 }
duke@0 1590 }
duke@0 1591 emit_opcode(*cbuf, 0x0F);
duke@0 1592 emit_opcode(*cbuf, 0x7E);
duke@0 1593 emit_rm(*cbuf, 0x3,
duke@0 1594 Matcher::_regEncode[dst_first] & 7,
duke@0 1595 Matcher::_regEncode[src_first] & 7);
duke@0 1596 #ifndef PRODUCT
duke@0 1597 } else if (!do_size) {
duke@0 1598 st->print("movdq %s, %s\t# spill",
duke@0 1599 Matcher::regName[dst_first],
duke@0 1600 Matcher::regName[src_first]);
duke@0 1601 #endif
duke@0 1602 }
duke@0 1603 return 5; // REX
duke@0 1604 } else {
duke@0 1605 // 32-bit
duke@0 1606 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1607 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1608 if (cbuf) {
duke@0 1609 emit_opcode(*cbuf, 0x66);
duke@0 1610 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1611 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1612 emit_opcode(*cbuf, Assembler::REX_R); // attention!
duke@0 1613 }
duke@0 1614 } else {
duke@0 1615 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1616 emit_opcode(*cbuf, Assembler::REX_B); // attention!
duke@0 1617 } else {
duke@0 1618 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1619 }
duke@0 1620 }
duke@0 1621 emit_opcode(*cbuf, 0x0F);
duke@0 1622 emit_opcode(*cbuf, 0x7E);
duke@0 1623 emit_rm(*cbuf, 0x3,
duke@0 1624 Matcher::_regEncode[dst_first] & 7,
duke@0 1625 Matcher::_regEncode[src_first] & 7);
duke@0 1626 #ifndef PRODUCT
duke@0 1627 } else if (!do_size) {
duke@0 1628 st->print("movdl %s, %s\t# spill",
duke@0 1629 Matcher::regName[dst_first],
duke@0 1630 Matcher::regName[src_first]);
duke@0 1631 #endif
duke@0 1632 }
duke@0 1633 return
duke@0 1634 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1635 ? 4
duke@0 1636 : 5; // REX
duke@0 1637 }
duke@0 1638 } else if (dst_first_rc == rc_float) {
duke@0 1639 // xmm -> xmm
duke@0 1640 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
duke@0 1641 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
duke@0 1642 // 64-bit
duke@0 1643 if (cbuf) {
duke@0 1644 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 1645 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1646 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1647 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1648 }
duke@0 1649 } else {
duke@0 1650 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1651 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1652 } else {
duke@0 1653 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1654 }
duke@0 1655 }
duke@0 1656 emit_opcode(*cbuf, 0x0F);
duke@0 1657 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1658 emit_rm(*cbuf, 0x3,
duke@0 1659 Matcher::_regEncode[dst_first] & 7,
duke@0 1660 Matcher::_regEncode[src_first] & 7);
duke@0 1661 #ifndef PRODUCT
duke@0 1662 } else if (!do_size) {
duke@0 1663 st->print("%s %s, %s\t# spill",
duke@0 1664 UseXmmRegToRegMoveAll ? "movapd" : "movsd ",
duke@0 1665 Matcher::regName[dst_first],
duke@0 1666 Matcher::regName[src_first]);
duke@0 1667 #endif
duke@0 1668 }
duke@0 1669 return
duke@0 1670 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1671 ? 4
duke@0 1672 : 5; // REX
duke@0 1673 } else {
duke@0 1674 // 32-bit
duke@0 1675 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
duke@0 1676 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
duke@0 1677 if (cbuf) {
duke@0 1678 if (!UseXmmRegToRegMoveAll)
duke@0 1679 emit_opcode(*cbuf, 0xF3);
duke@0 1680 if (Matcher::_regEncode[dst_first] < 8) {
duke@0 1681 if (Matcher::_regEncode[src_first] >= 8) {
duke@0 1682 emit_opcode(*cbuf, Assembler::REX_B);
duke@0 1683 }
duke@0 1684 } else {
duke@0 1685 if (Matcher::_regEncode[src_first] < 8) {
duke@0 1686 emit_opcode(*cbuf, Assembler::REX_R);
duke@0 1687 } else {
duke@0 1688 emit_opcode(*cbuf, Assembler::REX_RB);
duke@0 1689 }
duke@0 1690 }
duke@0 1691 emit_opcode(*cbuf, 0x0F);
duke@0 1692 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 1693 emit_rm(*cbuf, 0x3,
duke@0 1694 Matcher::_regEncode[dst_first] & 7,
duke@0 1695 Matcher::_regEncode[src_first] & 7);
duke@0 1696 #ifndef PRODUCT
duke@0 1697 } else if (!do_size) {
duke@0 1698 st->print("%s %s, %s\t# spill",
duke@0 1699 UseXmmRegToRegMoveAll ? "movaps" : "movss ",
duke@0 1700 Matcher::regName[dst_first],
duke@0 1701 Matcher::regName[src_first]);
duke@0 1702 #endif
duke@0 1703 }
duke@0 1704 return
duke@0 1705 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8)
duke@0 1706 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 1707 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX
duke@0 1708 }
duke@0 1709 }
duke@0 1710 }
duke@0 1711
duke@0 1712 assert(0," foo ");
duke@0 1713 Unimplemented();
duke@0 1714
duke@0 1715 return 0;
duke@0 1716 }
duke@0 1717
duke@0 1718 #ifndef PRODUCT
duke@0 1719 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const
duke@0 1720 {
duke@0 1721 implementation(NULL, ra_, false, st);
duke@0 1722 }
duke@0 1723 #endif
duke@0 1724
duke@0 1725 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
duke@0 1726 {
duke@0 1727 implementation(&cbuf, ra_, false, NULL);
duke@0 1728 }
duke@0 1729
duke@0 1730 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const
duke@0 1731 {
duke@0 1732 return implementation(NULL, ra_, true, NULL);
duke@0 1733 }
duke@0 1734
duke@0 1735 //=============================================================================
duke@0 1736 #ifndef PRODUCT
duke@0 1737 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
duke@0 1738 {
duke@0 1739 st->print("nop \t# %d bytes pad for loops and calls", _count);
duke@0 1740 }
duke@0 1741 #endif
duke@0 1742
duke@0 1743 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
duke@0 1744 {
duke@0 1745 MacroAssembler _masm(&cbuf);
duke@0 1746 __ nop(_count);
duke@0 1747 }
duke@0 1748
duke@0 1749 uint MachNopNode::size(PhaseRegAlloc*) const
duke@0 1750 {
duke@0 1751 return _count;
duke@0 1752 }
duke@0 1753
duke@0 1754
duke@0 1755 //=============================================================================
duke@0 1756 #ifndef PRODUCT
duke@0 1757 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1758 {
duke@0 1759 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1760 int reg = ra_->get_reg_first(this);
duke@0 1761 st->print("leaq %s, [rsp + #%d]\t# box lock",
duke@0 1762 Matcher::regName[reg], offset);
duke@0 1763 }
duke@0 1764 #endif
duke@0 1765
duke@0 1766 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1767 {
duke@0 1768 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1769 int reg = ra_->get_encode(this);
duke@0 1770 if (offset >= 0x80) {
duke@0 1771 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1772 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1773 emit_rm(cbuf, 0x2, reg & 7, 0x04);
duke@0 1774 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1775 emit_d32(cbuf, offset);
duke@0 1776 } else {
duke@0 1777 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR);
duke@0 1778 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
duke@0 1779 emit_rm(cbuf, 0x1, reg & 7, 0x04);
duke@0 1780 emit_rm(cbuf, 0x0, 0x04, RSP_enc);
duke@0 1781 emit_d8(cbuf, offset);
duke@0 1782 }
duke@0 1783 }
duke@0 1784
duke@0 1785 uint BoxLockNode::size(PhaseRegAlloc *ra_) const
duke@0 1786 {
duke@0 1787 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
duke@0 1788 return (offset < 0x80) ? 5 : 8; // REX
duke@0 1789 }
duke@0 1790
duke@0 1791 //=============================================================================
duke@0 1792
duke@0 1793 // emit call stub, compiled java to interpreter
duke@0 1794 void emit_java_to_interp(CodeBuffer& cbuf)
duke@0 1795 {
duke@0 1796 // Stub is fixed up when the corresponding call is converted from
duke@0 1797 // calling compiled code to calling interpreted code.
duke@0 1798 // movq rbx, 0
duke@0 1799 // jmp -5 # to self
duke@0 1800
duke@0 1801 address mark = cbuf.inst_mark(); // get mark within main instrs section
duke@0 1802
duke@0 1803 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1804 // That's why we must use the macroassembler to generate a stub.
duke@0 1805 MacroAssembler _masm(&cbuf);
duke@0 1806
duke@0 1807 address base =
duke@0 1808 __ start_a_stub(Compile::MAX_stubs_size);
duke@0 1809 if (base == NULL) return; // CodeBuffer::expand failed
duke@0 1810 // static stub relocation stores the instruction address of the call
duke@0 1811 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
duke@0 1812 // static stub relocation also tags the methodOop in the code-stream.
duke@0 1813 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
never@297 1814 // This is recognized as unresolved by relocs/nativeinst/ic code
duke@0 1815 __ jump(RuntimeAddress(__ pc()));
duke@0 1816
duke@0 1817 // Update current stubs pointer and restore code_end.
duke@0 1818 __ end_a_stub();
duke@0 1819 }
duke@0 1820
duke@0 1821 // size of call stub, compiled java to interpretor
duke@0 1822 uint size_java_to_interp()
duke@0 1823 {
duke@0 1824 return 15; // movq (1+1+8); jmp (1+4)
duke@0 1825 }
duke@0 1826
duke@0 1827 // relocation entries for call stub, compiled java to interpretor
duke@0 1828 uint reloc_java_to_interp()
duke@0 1829 {
duke@0 1830 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
duke@0 1831 }
duke@0 1832
duke@0 1833 //=============================================================================
duke@0 1834 #ifndef PRODUCT
duke@0 1835 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
duke@0 1836 {
coleenp@108 1837 if (UseCompressedOops) {
coleenp@108 1838 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t", oopDesc::klass_offset_in_bytes());
kvn@619 1839 if (Universe::narrow_oop_shift() != 0) {
kvn@619 1840 st->print_cr("leaq rscratch1, [r12_heapbase, r, Address::times_8, 0]");
kvn@619 1841 }
coleenp@108 1842 st->print_cr("cmpq rax, rscratch1\t # Inline cache check");
coleenp@108 1843 } else {
coleenp@108 1844 st->print_cr("cmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes() #%d]\t"
coleenp@108 1845 "# Inline cache check", oopDesc::klass_offset_in_bytes());
coleenp@108 1846 }
duke@0 1847 st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
duke@0 1848 st->print_cr("\tnop");
duke@0 1849 if (!OptoBreakpoint) {
duke@0 1850 st->print_cr("\tnop");
duke@0 1851 }
duke@0 1852 }
duke@0 1853 #endif
duke@0 1854
duke@0 1855 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
duke@0 1856 {
duke@0 1857 MacroAssembler masm(&cbuf);
duke@0 1858 #ifdef ASSERT
duke@0 1859 uint code_size = cbuf.code_size();
duke@0 1860 #endif
coleenp@108 1861 if (UseCompressedOops) {
coleenp@108 1862 masm.load_klass(rscratch1, j_rarg0);
never@297 1863 masm.cmpptr(rax, rscratch1);
coleenp@108 1864 } else {
never@297 1865 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
coleenp@108 1866 }
duke@0 1867
duke@0 1868 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
duke@0 1869
duke@0 1870 /* WARNING these NOPs are critical so that verified entry point is properly
duke@0 1871 aligned for patching by NativeJump::patch_verified_entry() */
duke@0 1872 int nops_cnt = 1;
duke@0 1873 if (!OptoBreakpoint) {
duke@0 1874 // Leave space for int3
duke@0 1875 nops_cnt += 1;
duke@0 1876 }
coleenp@108 1877 if (UseCompressedOops) {
coleenp@108 1878 // ??? divisible by 4 is aligned?
coleenp@108 1879 nops_cnt += 1;
coleenp@108 1880 }
duke@0 1881 masm.nop(nops_cnt);
duke@0 1882
duke@0 1883 assert(cbuf.code_size() - code_size == size(ra_),
duke@0 1884 "checking code size of inline cache node");
duke@0 1885 }
duke@0 1886
duke@0 1887 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
duke@0 1888 {
coleenp@108 1889 if (UseCompressedOops) {
kvn@619 1890 if (Universe::narrow_oop_shift() == 0) {
kvn@619 1891 return OptoBreakpoint ? 15 : 16;
kvn@619 1892 } else {
kvn@619 1893 return OptoBreakpoint ? 19 : 20;
kvn@619 1894 }
coleenp@108 1895 } else {
coleenp@108 1896 return OptoBreakpoint ? 11 : 12;
coleenp@108 1897 }
duke@0 1898 }
duke@0 1899
duke@0 1900
duke@0 1901 //=============================================================================
duke@0 1902 uint size_exception_handler()
duke@0 1903 {
duke@0 1904 // NativeCall instruction size is the same as NativeJump.
duke@0 1905 // Note that this value is also credited (in output.cpp) to
duke@0 1906 // the size of the code section.
duke@0 1907 return NativeJump::instruction_size;
duke@0 1908 }
duke@0 1909
duke@0 1910 // Emit exception handler code.
duke@0 1911 int emit_exception_handler(CodeBuffer& cbuf)
duke@0 1912 {
duke@0 1913
duke@0 1914 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1915 // That's why we must use the macroassembler to generate a handler.
duke@0 1916 MacroAssembler _masm(&cbuf);
duke@0 1917 address base =
duke@0 1918 __ start_a_stub(size_exception_handler());
duke@0 1919 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1920 int offset = __ offset();
duke@0 1921 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin()));
duke@0 1922 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
duke@0 1923 __ end_a_stub();
duke@0 1924 return offset;
duke@0 1925 }
duke@0 1926
duke@0 1927 uint size_deopt_handler()
duke@0 1928 {
duke@0 1929 // three 5 byte instructions
duke@0 1930 return 15;
duke@0 1931 }
duke@0 1932
duke@0 1933 // Emit deopt handler code.
duke@0 1934 int emit_deopt_handler(CodeBuffer& cbuf)
duke@0 1935 {
duke@0 1936
duke@0 1937 // Note that the code buffer's inst_mark is always relative to insts.
duke@0 1938 // That's why we must use the macroassembler to generate a handler.
duke@0 1939 MacroAssembler _masm(&cbuf);
duke@0 1940 address base =
duke@0 1941 __ start_a_stub(size_deopt_handler());
duke@0 1942 if (base == NULL) return 0; // CodeBuffer::expand failed
duke@0 1943 int offset = __ offset();
duke@0 1944 address the_pc = (address) __ pc();
duke@0 1945 Label next;
duke@0 1946 // push a "the_pc" on the stack without destroying any registers
duke@0 1947 // as they all may be live.
duke@0 1948
duke@0 1949 // push address of "next"
duke@0 1950 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
duke@0 1951 __ bind(next);
duke@0 1952 // adjust it so it matches "the_pc"
never@297 1953 __ subptr(Address(rsp, 0), __ offset() - offset);
duke@0 1954 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
duke@0 1955 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
duke@0 1956 __ end_a_stub();
duke@0 1957 return offset;
duke@0 1958 }
duke@0 1959
duke@0 1960 static void emit_double_constant(CodeBuffer& cbuf, double x) {
duke@0 1961 int mark = cbuf.insts()->mark_off();
duke@0 1962 MacroAssembler _masm(&cbuf);
duke@0 1963 address double_address = __ double_constant(x);
duke@0 1964 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1965 emit_d32_reloc(cbuf,
duke@0 1966 (int) (double_address - cbuf.code_end() - 4),
duke@0 1967 internal_word_Relocation::spec(double_address),
duke@0 1968 RELOC_DISP32);
duke@0 1969 }
duke@0 1970
duke@0 1971 static void emit_float_constant(CodeBuffer& cbuf, float x) {
duke@0 1972 int mark = cbuf.insts()->mark_off();
duke@0 1973 MacroAssembler _masm(&cbuf);
duke@0 1974 address float_address = __ float_constant(x);
duke@0 1975 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
duke@0 1976 emit_d32_reloc(cbuf,
duke@0 1977 (int) (float_address - cbuf.code_end() - 4),
duke@0 1978 internal_word_Relocation::spec(float_address),
duke@0 1979 RELOC_DISP32);
duke@0 1980 }
duke@0 1981
duke@0 1982
twisti@747 1983 const bool Matcher::match_rule_supported(int opcode) {
twisti@747 1984 if (!has_match_rule(opcode))
twisti@747 1985 return false;
twisti@747 1986
twisti@747 1987 return true; // Per default match rules are supported.
twisti@747 1988 }
twisti@747 1989
duke@0 1990 int Matcher::regnum_to_fpu_offset(int regnum)
duke@0 1991 {
duke@0 1992 return regnum - 32; // The FP registers are in the second chunk
duke@0 1993 }
duke@0 1994
duke@0 1995 // This is UltraSparc specific, true just means we have fast l2f conversion
duke@0 1996 const bool Matcher::convL2FSupported(void) {
duke@0 1997 return true;
duke@0 1998 }
duke@0 1999
duke@0 2000 // Vector width in bytes
duke@0 2001 const uint Matcher::vector_width_in_bytes(void) {
duke@0 2002 return 8;
duke@0 2003 }
duke@0 2004
duke@0 2005 // Vector ideal reg
duke@0 2006 const uint Matcher::vector_ideal_reg(void) {
duke@0 2007 return Op_RegD;
duke@0 2008 }
duke@0 2009
duke@0 2010 // Is this branch offset short enough that a short branch can be used?
duke@0 2011 //
duke@0 2012 // NOTE: If the platform does not provide any short branch variants, then
duke@0 2013 // this method should return false for offset 0.
never@406 2014 bool Matcher::is_short_branch_offset(int rule, int offset) {
never@406 2015 // the short version of jmpConUCF2 contains multiple branches,
never@406 2016 // making the reach slightly less
never@406 2017 if (rule == jmpConUCF2_rule)
never@406 2018 return (-126 <= offset && offset <= 125);
never@406 2019 return (-128 <= offset && offset <= 127);
duke@0 2020 }
duke@0 2021
duke@0 2022 const bool Matcher::isSimpleConstant64(jlong value) {
duke@0 2023 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
duke@0 2024 //return value == (int) value; // Cf. storeImmL and immL32.
duke@0 2025
duke@0 2026 // Probably always true, even if a temp register is required.
duke@0 2027 return true;
duke@0 2028 }
duke@0 2029
duke@0 2030 // The ecx parameter to rep stosq for the ClearArray node is in words.
duke@0 2031 const bool Matcher::init_array_count_is_in_bytes = false;
duke@0 2032
duke@0 2033 // Threshold size for cleararray.
duke@0 2034 const int Matcher::init_array_short_size = 8 * BytesPerLong;
duke@0 2035
duke@0 2036 // Should the Matcher clone shifts on addressing modes, expecting them
duke@0 2037 // to be subsumed into complex addressing expressions or compute them
duke@0 2038 // into registers? True for Intel but false for most RISCs
duke@0 2039 const bool Matcher::clone_shift_expressions = true;
duke@0 2040
duke@0 2041 // Is it better to copy float constants, or load them directly from
duke@0 2042 // memory? Intel can load a float constant from a direct address,
duke@0 2043 // requiring no extra registers. Most RISCs will have to materialize
duke@0 2044 // an address into a register first, so they would do better to copy
duke@0 2045 // the constant from stack.
duke@0 2046 const bool Matcher::rematerialize_float_constants = true; // XXX
duke@0 2047
duke@0 2048 // If CPU can load and store mis-aligned doubles directly then no
duke@0 2049 // fixup is needed. Else we split the double into 2 integer pieces
duke@0 2050 // and move it piece-by-piece. Only happens when passing doubles into
duke@0 2051 // C code as the Java calling convention forces doubles to be aligned.
duke@0 2052 const bool Matcher::misaligned_doubles_ok = true;
duke@0 2053
duke@0 2054 // No-op on amd64
duke@0 2055 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {}
duke@0 2056
duke@0 2057 // Advertise here if the CPU requires explicit rounding operations to
duke@0 2058 // implement the UseStrictFP mode.
duke@0 2059 const bool Matcher::strict_fp_requires_explicit_rounding = true;
duke@0 2060
duke@0 2061 // Do floats take an entire double register or just half?
duke@0 2062 const bool Matcher::float_in_double = true;
duke@0 2063 // Do ints take an entire long register or just half?
duke@0 2064 const bool Matcher::int_in_long = true;
duke@0 2065
duke@0 2066 // Return whether or not this register is ever used as an argument.
duke@0 2067 // This function is used on startup to build the trampoline stubs in
duke@0 2068 // generateOptoStub. Registers not mentioned will be killed by the VM
duke@0 2069 // call in the trampoline, and arguments in those registers not be
duke@0 2070 // available to the callee.
duke@0 2071 bool Matcher::can_be_java_arg(int reg)
duke@0 2072 {
duke@0 2073 return
duke@0 2074 reg == RDI_num || reg == RDI_H_num ||
duke@0 2075 reg == RSI_num || reg == RSI_H_num ||
duke@0 2076 reg == RDX_num || reg == RDX_H_num ||
duke@0 2077 reg == RCX_num || reg == RCX_H_num ||
duke@0 2078 reg == R8_num || reg == R8_H_num ||
duke@0 2079 reg == R9_num || reg == R9_H_num ||
coleenp@108 2080 reg == R12_num || reg == R12_H_num ||
duke@0 2081 reg == XMM0_num || reg == XMM0_H_num ||
duke@0 2082 reg == XMM1_num || reg == XMM1_H_num ||
duke@0 2083 reg == XMM2_num || reg == XMM2_H_num ||
duke@0 2084 reg == XMM3_num || reg == XMM3_H_num ||
duke@0 2085 reg == XMM4_num || reg == XMM4_H_num ||
duke@0 2086 reg == XMM5_num || reg == XMM5_H_num ||
duke@0 2087 reg == XMM6_num || reg == XMM6_H_num ||
duke@0 2088 reg == XMM7_num || reg == XMM7_H_num;
duke@0 2089 }
duke@0 2090
duke@0 2091 bool Matcher::is_spillable_arg(int reg)
duke@0 2092 {
duke@0 2093 return can_be_java_arg(reg);
duke@0 2094 }
duke@0 2095
duke@0 2096 // Register for DIVI projection of divmodI
duke@0 2097 RegMask Matcher::divI_proj_mask() {
duke@0 2098 return INT_RAX_REG_mask;
duke@0 2099 }
duke@0 2100
duke@0 2101 // Register for MODI projection of divmodI
duke@0 2102 RegMask Matcher::modI_proj_mask() {
duke@0 2103 return INT_RDX_REG_mask;
duke@0 2104 }
duke@0 2105
duke@0 2106 // Register for DIVL projection of divmodL
duke@0 2107 RegMask Matcher::divL_proj_mask() {
duke@0 2108 return LONG_RAX_REG_mask;
duke@0 2109 }
duke@0 2110
duke@0 2111 // Register for MODL projection of divmodL
duke@0 2112 RegMask Matcher::modL_proj_mask() {
duke@0 2113 return LONG_RDX_REG_mask;
duke@0 2114 }
duke@0 2115
coleenp@108 2116 static Address build_address(int b, int i, int s, int d) {
coleenp@108 2117 Register index = as_Register(i);
coleenp@108 2118 Address::ScaleFactor scale = (Address::ScaleFactor)s;
coleenp@108 2119 if (index == rsp) {
coleenp@108 2120 index = noreg;
coleenp@108 2121 scale = Address::no_scale;
coleenp@108 2122 }
coleenp@108 2123 Address addr(as_Register(b), index, scale, d);
coleenp@108 2124 return addr;
coleenp@108 2125 }
coleenp@108 2126
duke@0 2127 %}
duke@0 2128
duke@0 2129 //----------ENCODING BLOCK-----------------------------------------------------
duke@0 2130 // This block specifies the encoding classes used by the compiler to
duke@0 2131 // output byte streams. Encoding classes are parameterized macros
duke@0 2132 // used by Machine Instruction Nodes in order to generate the bit
duke@0 2133 // encoding of the instruction. Operands specify their base encoding
duke@0 2134 // interface with the interface keyword. There are currently
duke@0 2135 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
duke@0 2136 // COND_INTER. REG_INTER causes an operand to generate a function
duke@0 2137 // which returns its register number when queried. CONST_INTER causes
duke@0 2138 // an operand to generate a function which returns the value of the
duke@0 2139 // constant when queried. MEMORY_INTER causes an operand to generate
duke@0 2140 // four functions which return the Base Register, the Index Register,
duke@0 2141 // the Scale Value, and the Offset Value of the operand when queried.
duke@0 2142 // COND_INTER causes an operand to generate six functions which return
duke@0 2143 // the encoding code (ie - encoding bits for the instruction)
duke@0 2144 // associated with each basic boolean condition for a conditional
duke@0 2145 // instruction.
duke@0 2146 //
duke@0 2147 // Instructions specify two basic values for encoding. Again, a
duke@0 2148 // function is available to check if the constant displacement is an
duke@0 2149 // oop. They use the ins_encode keyword to specify their encoding
duke@0 2150 // classes (which must be a sequence of enc_class names, and their
duke@0 2151 // parameters, specified in the encoding block), and they use the
duke@0 2152 // opcode keyword to specify, in order, their primary, secondary, and
duke@0 2153 // tertiary opcode. Only the opcode sections which a particular
duke@0 2154 // instruction needs for encoding need to be specified.
duke@0 2155 encode %{
duke@0 2156 // Build emit functions for each basic byte or larger field in the
duke@0 2157 // intel encoding scheme (opcode, rm, sib, immediate), and call them
duke@0 2158 // from C++ code in the enc_class source block. Emit functions will
duke@0 2159 // live in the main source block for now. In future, we can
duke@0 2160 // generalize this by adding a syntax that specifies the sizes of
duke@0 2161 // fields in an order, so that the adlc can build the emit functions
duke@0 2162 // automagically
duke@0 2163
duke@0 2164 // Emit primary opcode
duke@0 2165 enc_class OpcP
duke@0 2166 %{
duke@0 2167 emit_opcode(cbuf, $primary);
duke@0 2168 %}
duke@0 2169
duke@0 2170 // Emit secondary opcode
duke@0 2171 enc_class OpcS
duke@0 2172 %{
duke@0 2173 emit_opcode(cbuf, $secondary);
duke@0 2174 %}
duke@0 2175
duke@0 2176 // Emit tertiary opcode
duke@0 2177 enc_class OpcT
duke@0 2178 %{
duke@0 2179 emit_opcode(cbuf, $tertiary);
duke@0 2180 %}
duke@0 2181
duke@0 2182 // Emit opcode directly
duke@0 2183 enc_class Opcode(immI d8)
duke@0 2184 %{
duke@0 2185 emit_opcode(cbuf, $d8$$constant);
duke@0 2186 %}
duke@0 2187
duke@0 2188 // Emit size prefix
duke@0 2189 enc_class SizePrefix
duke@0 2190 %{
duke@0 2191 emit_opcode(cbuf, 0x66);
duke@0 2192 %}
duke@0 2193
duke@0 2194 enc_class reg(rRegI reg)
duke@0 2195 %{
duke@0 2196 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7);
duke@0 2197 %}
duke@0 2198
duke@0 2199 enc_class reg_reg(rRegI dst, rRegI src)
duke@0 2200 %{
duke@0 2201 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2202 %}
duke@0 2203
duke@0 2204 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src)
duke@0 2205 %{
duke@0 2206 emit_opcode(cbuf, $opcode$$constant);
duke@0 2207 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2208 %}
duke@0 2209
duke@0 2210 enc_class cmpfp_fixup()
duke@0 2211 %{
duke@0 2212 // jnp,s exit
duke@0 2213 emit_opcode(cbuf, 0x7B);
duke@0 2214 emit_d8(cbuf, 0x0A);
duke@0 2215
duke@0 2216 // pushfq
duke@0 2217 emit_opcode(cbuf, 0x9C);
duke@0 2218
duke@0 2219 // andq $0xffffff2b, (%rsp)
duke@0 2220 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2221 emit_opcode(cbuf, 0x81);
duke@0 2222 emit_opcode(cbuf, 0x24);
duke@0 2223 emit_opcode(cbuf, 0x24);
duke@0 2224 emit_d32(cbuf, 0xffffff2b);
duke@0 2225
duke@0 2226 // popfq
duke@0 2227 emit_opcode(cbuf, 0x9D);
duke@0 2228
duke@0 2229 // nop (target for branch to avoid branch to branch)
duke@0 2230 emit_opcode(cbuf, 0x90);
duke@0 2231 %}
duke@0 2232
duke@0 2233 enc_class cmpfp3(rRegI dst)
duke@0 2234 %{
duke@0 2235 int dstenc = $dst$$reg;
duke@0 2236
duke@0 2237 // movl $dst, -1
duke@0 2238 if (dstenc >= 8) {
duke@0 2239 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2240 }
duke@0 2241 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 2242 emit_d32(cbuf, -1);
duke@0 2243
duke@0 2244 // jp,s done
duke@0 2245 emit_opcode(cbuf, 0x7A);
duke@0 2246 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A);
duke@0 2247
duke@0 2248 // jb,s done
duke@0 2249 emit_opcode(cbuf, 0x72);
duke@0 2250 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 2251
duke@0 2252 // setne $dst
duke@0 2253 if (dstenc >= 4) {
duke@0 2254 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 2255 }
duke@0 2256 emit_opcode(cbuf, 0x0F);
duke@0 2257 emit_opcode(cbuf, 0x95);
duke@0 2258 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 2259
duke@0 2260 // movzbl $dst, $dst
duke@0 2261 if (dstenc >= 4) {
duke@0 2262 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 2263 }
duke@0 2264 emit_opcode(cbuf, 0x0F);
duke@0 2265 emit_opcode(cbuf, 0xB6);
duke@0 2266 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 2267 %}
duke@0 2268
duke@0 2269 enc_class cdql_enc(no_rax_rdx_RegI div)
duke@0 2270 %{
duke@0 2271 // Full implementation of Java idiv and irem; checks for
duke@0 2272 // special case as described in JVM spec., p.243 & p.271.
duke@0 2273 //
duke@0 2274 // normal case special case
duke@0 2275 //
duke@0 2276 // input : rax: dividend min_int
duke@0 2277 // reg: divisor -1
duke@0 2278 //
duke@0 2279 // output: rax: quotient (= rax idiv reg) min_int
duke@0 2280 // rdx: remainder (= rax irem reg) 0
duke@0 2281 //
duke@0 2282 // Code sequnce:
duke@0 2283 //
duke@0 2284 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax
duke@0 2285 // 5: 75 07/08 jne e <normal>
duke@0 2286 // 7: 33 d2 xor %edx,%edx
duke@0 2287 // [div >= 8 -> offset + 1]
duke@0 2288 // [REX_B]
duke@0 2289 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2290 // c: 74 03/04 je 11 <done>
duke@0 2291 // 000000000000000e <normal>:
duke@0 2292 // e: 99 cltd
duke@0 2293 // [div >= 8 -> offset + 1]
duke@0 2294 // [REX_B]
duke@0 2295 // f: f7 f9 idiv $div
duke@0 2296 // 0000000000000011 <done>:
duke@0 2297
duke@0 2298 // cmp $0x80000000,%eax
duke@0 2299 emit_opcode(cbuf, 0x3d);
duke@0 2300 emit_d8(cbuf, 0x00);
duke@0 2301 emit_d8(cbuf, 0x00);
duke@0 2302 emit_d8(cbuf, 0x00);
duke@0 2303 emit_d8(cbuf, 0x80);
duke@0 2304
duke@0 2305 // jne e <normal>
duke@0 2306 emit_opcode(cbuf, 0x75);
duke@0 2307 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08);
duke@0 2308
duke@0 2309 // xor %edx,%edx
duke@0 2310 emit_opcode(cbuf, 0x33);
duke@0 2311 emit_d8(cbuf, 0xD2);
duke@0 2312
duke@0 2313 // cmp $0xffffffffffffffff,%ecx
duke@0 2314 if ($div$$reg >= 8) {
duke@0 2315 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2316 }
duke@0 2317 emit_opcode(cbuf, 0x83);
duke@0 2318 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2319 emit_d8(cbuf, 0xFF);
duke@0 2320
duke@0 2321 // je 11 <done>
duke@0 2322 emit_opcode(cbuf, 0x74);
duke@0 2323 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04);
duke@0 2324
duke@0 2325 // <normal>
duke@0 2326 // cltd
duke@0 2327 emit_opcode(cbuf, 0x99);
duke@0 2328
duke@0 2329 // idivl (note: must be emitted by the user of this rule)
duke@0 2330 // <done>
duke@0 2331 %}
duke@0 2332
duke@0 2333 enc_class cdqq_enc(no_rax_rdx_RegL div)
duke@0 2334 %{
duke@0 2335 // Full implementation of Java ldiv and lrem; checks for
duke@0 2336 // special case as described in JVM spec., p.243 & p.271.
duke@0 2337 //
duke@0 2338 // normal case special case
duke@0 2339 //
duke@0 2340 // input : rax: dividend min_long
duke@0 2341 // reg: divisor -1
duke@0 2342 //
duke@0 2343 // output: rax: quotient (= rax idiv reg) min_long
duke@0 2344 // rdx: remainder (= rax irem reg) 0
duke@0 2345 //
duke@0 2346 // Code sequnce:
duke@0 2347 //
duke@0 2348 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx
duke@0 2349 // 7: 00 00 80
duke@0 2350 // a: 48 39 d0 cmp %rdx,%rax
duke@0 2351 // d: 75 08 jne 17 <normal>
duke@0 2352 // f: 33 d2 xor %edx,%edx
duke@0 2353 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div
duke@0 2354 // 15: 74 05 je 1c <done>
duke@0 2355 // 0000000000000017 <normal>:
duke@0 2356 // 17: 48 99 cqto
duke@0 2357 // 19: 48 f7 f9 idiv $div
duke@0 2358 // 000000000000001c <done>:
duke@0 2359
duke@0 2360 // mov $0x8000000000000000,%rdx
duke@0 2361 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2362 emit_opcode(cbuf, 0xBA);
duke@0 2363 emit_d8(cbuf, 0x00);
duke@0 2364 emit_d8(cbuf, 0x00);
duke@0 2365 emit_d8(cbuf, 0x00);
duke@0 2366 emit_d8(cbuf, 0x00);
duke@0 2367 emit_d8(cbuf, 0x00);
duke@0 2368 emit_d8(cbuf, 0x00);
duke@0 2369 emit_d8(cbuf, 0x00);
duke@0 2370 emit_d8(cbuf, 0x80);
duke@0 2371
duke@0 2372 // cmp %rdx,%rax
duke@0 2373 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2374 emit_opcode(cbuf, 0x39);
duke@0 2375 emit_d8(cbuf, 0xD0);
duke@0 2376
duke@0 2377 // jne 17 <normal>
duke@0 2378 emit_opcode(cbuf, 0x75);
duke@0 2379 emit_d8(cbuf, 0x08);
duke@0 2380
duke@0 2381 // xor %edx,%edx
duke@0 2382 emit_opcode(cbuf, 0x33);
duke@0 2383 emit_d8(cbuf, 0xD2);
duke@0 2384
duke@0 2385 // cmp $0xffffffffffffffff,$div
duke@0 2386 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB);
duke@0 2387 emit_opcode(cbuf, 0x83);
duke@0 2388 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7);
duke@0 2389 emit_d8(cbuf, 0xFF);
duke@0 2390
duke@0 2391 // je 1e <done>
duke@0 2392 emit_opcode(cbuf, 0x74);
duke@0 2393 emit_d8(cbuf, 0x05);
duke@0 2394
duke@0 2395 // <normal>
duke@0 2396 // cqto
duke@0 2397 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2398 emit_opcode(cbuf, 0x99);
duke@0 2399
duke@0 2400 // idivq (note: must be emitted by the user of this rule)
duke@0 2401 // <done>
duke@0 2402 %}
duke@0 2403
duke@0 2404 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension
duke@0 2405 enc_class OpcSE(immI imm)
duke@0 2406 %{
duke@0 2407 // Emit primary opcode and set sign-extend bit
duke@0 2408 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2409 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2410 emit_opcode(cbuf, $primary | 0x02);
duke@0 2411 } else {
duke@0 2412 // 32-bit immediate
duke@0 2413 emit_opcode(cbuf, $primary);
duke@0 2414 }
duke@0 2415 %}
duke@0 2416
duke@0 2417 enc_class OpcSErm(rRegI dst, immI imm)
duke@0 2418 %{
duke@0 2419 // OpcSEr/m
duke@0 2420 int dstenc = $dst$$reg;
duke@0 2421 if (dstenc >= 8) {
duke@0 2422 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2423 dstenc -= 8;
duke@0 2424 }
duke@0 2425 // Emit primary opcode and set sign-extend bit
duke@0 2426 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2427 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2428 emit_opcode(cbuf, $primary | 0x02);
duke@0 2429 } else {
duke@0 2430 // 32-bit immediate
duke@0 2431 emit_opcode(cbuf, $primary);
duke@0 2432 }
duke@0 2433 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2434 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2435 %}
duke@0 2436
duke@0 2437 enc_class OpcSErm_wide(rRegL dst, immI imm)
duke@0 2438 %{
duke@0 2439 // OpcSEr/m
duke@0 2440 int dstenc = $dst$$reg;
duke@0 2441 if (dstenc < 8) {
duke@0 2442 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2443 } else {
duke@0 2444 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2445 dstenc -= 8;
duke@0 2446 }
duke@0 2447 // Emit primary opcode and set sign-extend bit
duke@0 2448 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2449 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2450 emit_opcode(cbuf, $primary | 0x02);
duke@0 2451 } else {
duke@0 2452 // 32-bit immediate
duke@0 2453 emit_opcode(cbuf, $primary);
duke@0 2454 }
duke@0 2455 // Emit r/m byte with secondary opcode, after primary opcode.
duke@0 2456 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2457 %}
duke@0 2458
duke@0 2459 enc_class Con8or32(immI imm)
duke@0 2460 %{
duke@0 2461 // Check for 8-bit immediate, and set sign extend bit in opcode
duke@0 2462 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) {
duke@0 2463 $$$emit8$imm$$constant;
duke@0 2464 } else {
duke@0 2465 // 32-bit immediate
duke@0 2466 $$$emit32$imm$$constant;
duke@0 2467 }
duke@0 2468 %}
duke@0 2469
duke@0 2470 enc_class Lbl(label labl)
duke@0 2471 %{
duke@0 2472 // JMP, CALL
duke@0 2473 Label* l = $labl$$label;
duke@0 2474 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2475 %}
duke@0 2476
duke@0 2477 enc_class LblShort(label labl)
duke@0 2478 %{
duke@0 2479 // JMP, CALL
duke@0 2480 Label* l = $labl$$label;
duke@0 2481 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2482 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2483 emit_d8(cbuf, disp);
duke@0 2484 %}
duke@0 2485
duke@0 2486 enc_class opc2_reg(rRegI dst)
duke@0 2487 %{
duke@0 2488 // BSWAP
duke@0 2489 emit_cc(cbuf, $secondary, $dst$$reg);
duke@0 2490 %}
duke@0 2491
duke@0 2492 enc_class opc3_reg(rRegI dst)
duke@0 2493 %{
duke@0 2494 // BSWAP
duke@0 2495 emit_cc(cbuf, $tertiary, $dst$$reg);
duke@0 2496 %}
duke@0 2497
duke@0 2498 enc_class reg_opc(rRegI div)
duke@0 2499 %{
duke@0 2500 // INC, DEC, IDIV, IMOD, JMP indirect, ...
duke@0 2501 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7);
duke@0 2502 %}
duke@0 2503
duke@0 2504 enc_class Jcc(cmpOp cop, label labl)
duke@0 2505 %{
duke@0 2506 // JCC
duke@0 2507 Label* l = $labl$$label;
duke@0 2508 $$$emit8$primary;
duke@0 2509 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2510 emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0);
duke@0 2511 %}
duke@0 2512
duke@0 2513 enc_class JccShort (cmpOp cop, label labl)
duke@0 2514 %{
duke@0 2515 // JCC
duke@0 2516 Label *l = $labl$$label;
duke@0 2517 emit_cc(cbuf, $primary, $cop$$cmpcode);
duke@0 2518 int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0;
duke@0 2519 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp");
duke@0 2520 emit_d8(cbuf, disp);
duke@0 2521 %}
duke@0 2522
duke@0 2523 enc_class enc_cmov(cmpOp cop)
duke@0 2524 %{
duke@0 2525 // CMOV
duke@0 2526 $$$emit8$primary;
duke@0 2527 emit_cc(cbuf, $secondary, $cop$$cmpcode);
duke@0 2528 %}
duke@0 2529
duke@0 2530 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src)
duke@0 2531 %{
duke@0 2532 // Invert sense of branch from sense of cmov
duke@0 2533 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2534 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8)
duke@0 2535 ? (UseXmmRegToRegMoveAll ? 3 : 4)
duke@0 2536 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX
duke@0 2537 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src)
duke@0 2538 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3);
duke@0 2539 if ($dst$$reg < 8) {
duke@0 2540 if ($src$$reg >= 8) {
duke@0 2541 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2542 }
duke@0 2543 } else {
duke@0 2544 if ($src$$reg < 8) {
duke@0 2545 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2546 } else {
duke@0 2547 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2548 }
duke@0 2549 }
duke@0 2550 emit_opcode(cbuf, 0x0F);
duke@0 2551 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2552 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2553 %}
duke@0 2554
duke@0 2555 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src)
duke@0 2556 %{
duke@0 2557 // Invert sense of branch from sense of cmov
duke@0 2558 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1);
duke@0 2559 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX
duke@0 2560
duke@0 2561 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src)
duke@0 2562 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2);
duke@0 2563 if ($dst$$reg < 8) {
duke@0 2564 if ($src$$reg >= 8) {
duke@0 2565 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2566 }
duke@0 2567 } else {
duke@0 2568 if ($src$$reg < 8) {
duke@0 2569 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2570 } else {
duke@0 2571 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2572 }
duke@0 2573 }
duke@0 2574 emit_opcode(cbuf, 0x0F);
duke@0 2575 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10);
duke@0 2576 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7);
duke@0 2577 %}
duke@0 2578
duke@0 2579 enc_class enc_PartialSubtypeCheck()
duke@0 2580 %{
duke@0 2581 Register Rrdi = as_Register(RDI_enc); // result register
duke@0 2582 Register Rrax = as_Register(RAX_enc); // super class
duke@0 2583 Register Rrcx = as_Register(RCX_enc); // killed
duke@0 2584 Register Rrsi = as_Register(RSI_enc); // sub class
jrose@621 2585 Label miss;
jrose@621 2586 const bool set_cond_codes = true;
duke@0 2587
duke@0 2588 MacroAssembler _masm(&cbuf);
jrose@621 2589 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi,
jrose@621 2590 NULL, &miss,
jrose@621 2591 /*set_cond_codes:*/ true);
duke@0 2592 if ($primary) {
never@297 2593 __ xorptr(Rrdi, Rrdi);
duke@0 2594 }
duke@0 2595 __ bind(miss);
duke@0 2596 %}
duke@0 2597
duke@0 2598 enc_class Java_To_Interpreter(method meth)
duke@0 2599 %{
duke@0 2600 // CALL Java_To_Interpreter
duke@0 2601 // This is the instruction starting address for relocation info.
duke@0 2602 cbuf.set_inst_mark();
duke@0 2603 $$$emit8$primary;
duke@0 2604 // CALL directly to the runtime
duke@0 2605 emit_d32_reloc(cbuf,
duke@0 2606 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2607 runtime_call_Relocation::spec(),
duke@0 2608 RELOC_DISP32);
duke@0 2609 %}
duke@0 2610
duke@0 2611 enc_class Java_Static_Call(method meth)
duke@0 2612 %{
duke@0 2613 // JAVA STATIC CALL
duke@0 2614 // CALL to fixup routine. Fixup routine uses ScopeDesc info to
duke@0 2615 // determine who we intended to call.
duke@0 2616 cbuf.set_inst_mark();
duke@0 2617 $$$emit8$primary;
duke@0 2618
duke@0 2619 if (!_method) {
duke@0 2620 emit_d32_reloc(cbuf,
duke@0 2621 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2622 runtime_call_Relocation::spec(),
duke@0 2623 RELOC_DISP32);
duke@0 2624 } else if (_optimized_virtual) {
duke@0 2625 emit_d32_reloc(cbuf,
duke@0 2626 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2627 opt_virtual_call_Relocation::spec(),
duke@0 2628 RELOC_DISP32);
duke@0 2629 } else {
duke@0 2630 emit_d32_reloc(cbuf,
duke@0 2631 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2632 static_call_Relocation::spec(),
duke@0 2633 RELOC_DISP32);
duke@0 2634 }
duke@0 2635 if (_method) {
duke@0 2636 // Emit stub for static call
duke@0 2637 emit_java_to_interp(cbuf);
duke@0 2638 }
duke@0 2639 %}
duke@0 2640
duke@0 2641 enc_class Java_Dynamic_Call(method meth)
duke@0 2642 %{
duke@0 2643 // JAVA DYNAMIC CALL
duke@0 2644 // !!!!!
duke@0 2645 // Generate "movq rax, -1", placeholder instruction to load oop-info
duke@0 2646 // emit_call_dynamic_prologue( cbuf );
duke@0 2647 cbuf.set_inst_mark();
duke@0 2648
duke@0 2649 // movq rax, -1
duke@0 2650 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2651 emit_opcode(cbuf, 0xB8 | RAX_enc);
duke@0 2652 emit_d64_reloc(cbuf,
duke@0 2653 (int64_t) Universe::non_oop_word(),
duke@0 2654 oop_Relocation::spec_for_immediate(), RELOC_IMM64);
duke@0 2655 address virtual_call_oop_addr = cbuf.inst_mark();
duke@0 2656 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
duke@0 2657 // who we intended to call.
duke@0 2658 cbuf.set_inst_mark();
duke@0 2659 $$$emit8$primary;
duke@0 2660 emit_d32_reloc(cbuf,
duke@0 2661 (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4),
duke@0 2662 virtual_call_Relocation::spec(virtual_call_oop_addr),
duke@0 2663 RELOC_DISP32);
duke@0 2664 %}
duke@0 2665
duke@0 2666 enc_class Java_Compiled_Call(method meth)
duke@0 2667 %{
duke@0 2668 // JAVA COMPILED CALL
duke@0 2669 int disp = in_bytes(methodOopDesc:: from_compiled_offset());
duke@0 2670
duke@0 2671 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!!
duke@0 2672 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small");
duke@0 2673
duke@0 2674 // callq *disp(%rax)
duke@0 2675 cbuf.set_inst_mark();
duke@0 2676 $$$emit8$primary;
duke@0 2677 if (disp < 0x80) {
duke@0 2678 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte
duke@0 2679 emit_d8(cbuf, disp); // Displacement
duke@0 2680 } else {
duke@0 2681 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte
duke@0 2682 emit_d32(cbuf, disp); // Displacement
duke@0 2683 }
duke@0 2684 %}
duke@0 2685
duke@0 2686 enc_class reg_opc_imm(rRegI dst, immI8 shift)
duke@0 2687 %{
duke@0 2688 // SAL, SAR, SHR
duke@0 2689 int dstenc = $dst$$reg;
duke@0 2690 if (dstenc >= 8) {
duke@0 2691 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2692 dstenc -= 8;
duke@0 2693 }
duke@0 2694 $$$emit8$primary;
duke@0 2695 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2696 $$$emit8$shift$$constant;
duke@0 2697 %}
duke@0 2698
duke@0 2699 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift)
duke@0 2700 %{
duke@0 2701 // SAL, SAR, SHR
duke@0 2702 int dstenc = $dst$$reg;
duke@0 2703 if (dstenc < 8) {
duke@0 2704 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2705 } else {
duke@0 2706 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2707 dstenc -= 8;
duke@0 2708 }
duke@0 2709 $$$emit8$primary;
duke@0 2710 emit_rm(cbuf, 0x3, $secondary, dstenc);
duke@0 2711 $$$emit8$shift$$constant;
duke@0 2712 %}
duke@0 2713
duke@0 2714 enc_class load_immI(rRegI dst, immI src)
duke@0 2715 %{
duke@0 2716 int dstenc = $dst$$reg;
duke@0 2717 if (dstenc >= 8) {
duke@0 2718 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2719 dstenc -= 8;
duke@0 2720 }
duke@0 2721 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2722 $$$emit32$src$$constant;
duke@0 2723 %}
duke@0 2724
duke@0 2725 enc_class load_immL(rRegL dst, immL src)
duke@0 2726 %{
duke@0 2727 int dstenc = $dst$$reg;
duke@0 2728 if (dstenc < 8) {
duke@0 2729 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2730 } else {
duke@0 2731 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2732 dstenc -= 8;
duke@0 2733 }
duke@0 2734 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2735 emit_d64(cbuf, $src$$constant);
duke@0 2736 %}
duke@0 2737
duke@0 2738 enc_class load_immUL32(rRegL dst, immUL32 src)
duke@0 2739 %{
duke@0 2740 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2741 int dstenc = $dst$$reg;
duke@0 2742 if (dstenc >= 8) {
duke@0 2743 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2744 dstenc -= 8;
duke@0 2745 }
duke@0 2746 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2747 $$$emit32$src$$constant;
duke@0 2748 %}
duke@0 2749
duke@0 2750 enc_class load_immL32(rRegL dst, immL32 src)
duke@0 2751 %{
duke@0 2752 int dstenc = $dst$$reg;
duke@0 2753 if (dstenc < 8) {
duke@0 2754 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2755 } else {
duke@0 2756 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2757 dstenc -= 8;
duke@0 2758 }
duke@0 2759 emit_opcode(cbuf, 0xC7);
duke@0 2760 emit_rm(cbuf, 0x03, 0x00, dstenc);
duke@0 2761 $$$emit32$src$$constant;
duke@0 2762 %}
duke@0 2763
duke@0 2764 enc_class load_immP31(rRegP dst, immP32 src)
duke@0 2765 %{
duke@0 2766 // same as load_immI, but this time we care about zeroes in the high word
duke@0 2767 int dstenc = $dst$$reg;
duke@0 2768 if (dstenc >= 8) {
duke@0 2769 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2770 dstenc -= 8;
duke@0 2771 }
duke@0 2772 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2773 $$$emit32$src$$constant;
duke@0 2774 %}
duke@0 2775
duke@0 2776 enc_class load_immP(rRegP dst, immP src)
duke@0 2777 %{
duke@0 2778 int dstenc = $dst$$reg;
duke@0 2779 if (dstenc < 8) {
duke@0 2780 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2781 } else {
duke@0 2782 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2783 dstenc -= 8;
duke@0 2784 }
duke@0 2785 emit_opcode(cbuf, 0xB8 | dstenc);
duke@0 2786 // This next line should be generated from ADLC
duke@0 2787 if ($src->constant_is_oop()) {
duke@0 2788 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64);
duke@0 2789 } else {
duke@0 2790 emit_d64(cbuf, $src$$constant);
duke@0 2791 }
duke@0 2792 %}
duke@0 2793
duke@0 2794 enc_class load_immF(regF dst, immF con)
duke@0 2795 %{
duke@0 2796 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2797 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2798 emit_float_constant(cbuf, $con$$constant);
duke@0 2799 %}
duke@0 2800
duke@0 2801 enc_class load_immD(regD dst, immD con)
duke@0 2802 %{
duke@0 2803 // XXX reg_mem doesn't support RIP-relative addressing yet
duke@0 2804 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2805 emit_double_constant(cbuf, $con$$constant);
duke@0 2806 %}
duke@0 2807
duke@0 2808 enc_class load_conF (regF dst, immF con) %{ // Load float constant
duke@0 2809 emit_opcode(cbuf, 0xF3);
duke@0 2810 if ($dst$$reg >= 8) {
duke@0 2811 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2812 }
duke@0 2813 emit_opcode(cbuf, 0x0F);
duke@0 2814 emit_opcode(cbuf, 0x10);
duke@0 2815 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2816 emit_float_constant(cbuf, $con$$constant);
duke@0 2817 %}
duke@0 2818
duke@0 2819 enc_class load_conD (regD dst, immD con) %{ // Load double constant
duke@0 2820 // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
duke@0 2821 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 2822 if ($dst$$reg >= 8) {
duke@0 2823 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2824 }
duke@0 2825 emit_opcode(cbuf, 0x0F);
duke@0 2826 emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
duke@0 2827 emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
duke@0 2828 emit_double_constant(cbuf, $con$$constant);
duke@0 2829 %}
duke@0 2830
duke@0 2831 // Encode a reg-reg copy. If it is useless, then empty encoding.
duke@0 2832 enc_class enc_copy(rRegI dst, rRegI src)
duke@0 2833 %{
duke@0 2834 encode_copy(cbuf, $dst$$reg, $src$$reg);
duke@0 2835 %}
duke@0 2836
duke@0 2837 // Encode xmm reg-reg copy. If it is useless, then empty encoding.
duke@0 2838 enc_class enc_CopyXD( RegD dst, RegD src ) %{
duke@0 2839 encode_CopyXD( cbuf, $dst$$reg, $src$$reg );
duke@0 2840 %}
duke@0 2841
duke@0 2842 enc_class enc_copy_always(rRegI dst, rRegI src)
duke@0 2843 %{
duke@0 2844 int srcenc = $src$$reg;
duke@0 2845 int dstenc = $dst$$reg;
duke@0 2846
duke@0 2847 if (dstenc < 8) {
duke@0 2848 if (srcenc >= 8) {
duke@0 2849 emit_opcode(cbuf, Assembler::REX_B);
duke@0 2850 srcenc -= 8;
duke@0 2851 }
duke@0 2852 } else {
duke@0 2853 if (srcenc < 8) {
duke@0 2854 emit_opcode(cbuf, Assembler::REX_R);
duke@0 2855 } else {
duke@0 2856 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 2857 srcenc -= 8;
duke@0 2858 }
duke@0 2859 dstenc -= 8;
duke@0 2860 }
duke@0 2861
duke@0 2862 emit_opcode(cbuf, 0x8B);
duke@0 2863 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2864 %}
duke@0 2865
duke@0 2866 enc_class enc_copy_wide(rRegL dst, rRegL src)
duke@0 2867 %{
duke@0 2868 int srcenc = $src$$reg;
duke@0 2869 int dstenc = $dst$$reg;
duke@0 2870
duke@0 2871 if (dstenc != srcenc) {
duke@0 2872 if (dstenc < 8) {
duke@0 2873 if (srcenc < 8) {
duke@0 2874 emit_opcode(cbuf, Assembler::REX_W);
duke@0 2875 } else {
duke@0 2876 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 2877 srcenc -= 8;
duke@0 2878 }
duke@0 2879 } else {
duke@0 2880 if (srcenc < 8) {
duke@0 2881 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 2882 } else {
duke@0 2883 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 2884 srcenc -= 8;
duke@0 2885 }
duke@0 2886 dstenc -= 8;
duke@0 2887 }
duke@0 2888 emit_opcode(cbuf, 0x8B);
duke@0 2889 emit_rm(cbuf, 0x3, dstenc, srcenc);
duke@0 2890 }
duke@0 2891 %}
duke@0 2892
duke@0 2893 enc_class Con32(immI src)
duke@0 2894 %{
duke@0 2895 // Output immediate
duke@0 2896 $$$emit32$src$$constant;
duke@0 2897 %}
duke@0 2898
duke@0 2899 enc_class Con64(immL src)
duke@0 2900 %{
duke@0 2901 // Output immediate
duke@0 2902 emit_d64($src$$constant);
duke@0 2903 %}
duke@0 2904
duke@0 2905 enc_class Con32F_as_bits(immF src)
duke@0 2906 %{
duke@0 2907 // Output Float immediate bits
duke@0 2908 jfloat jf = $src$$constant;
duke@0 2909 jint jf_as_bits = jint_cast(jf);
duke@0 2910 emit_d32(cbuf, jf_as_bits);
duke@0 2911 %}
duke@0 2912
duke@0 2913 enc_class Con16(immI src)
duke@0 2914 %{
duke@0 2915 // Output immediate
duke@0 2916 $$$emit16$src$$constant;
duke@0 2917 %}
duke@0 2918
duke@0 2919 // How is this different from Con32??? XXX
duke@0 2920 enc_class Con_d32(immI src)
duke@0 2921 %{
duke@0 2922 emit_d32(cbuf,$src$$constant);
duke@0 2923 %}
duke@0 2924
duke@0 2925 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI)
duke@0 2926 // Output immediate memory reference
duke@0 2927 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
duke@0 2928 emit_d32(cbuf, 0x00);
duke@0 2929 %}
duke@0 2930
duke@0 2931 enc_class jump_enc(rRegL switch_val, rRegI dest) %{
duke@0 2932 MacroAssembler masm(&cbuf);
duke@0 2933
duke@0 2934 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2935 Register dest_reg = as_Register($dest$$reg);
duke@0 2936 address table_base = masm.address_table_constant(_index2label);
duke@0 2937
duke@0 2938 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2939 // to do that and the compiler is using that register as one it can allocate.
duke@0 2940 // So we build it all by hand.
duke@0 2941 // Address index(noreg, switch_reg, Address::times_1);
duke@0 2942 // ArrayAddress dispatch(table, index);
duke@0 2943
duke@0 2944 Address dispatch(dest_reg, switch_reg, Address::times_1);
duke@0 2945
duke@0 2946 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 2947 masm.jmp(dispatch);
duke@0 2948 %}
duke@0 2949
duke@0 2950 enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
duke@0 2951 MacroAssembler masm(&cbuf);
duke@0 2952
duke@0 2953 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2954 Register dest_reg = as_Register($dest$$reg);
duke@0 2955 address table_base = masm.address_table_constant(_index2label);
duke@0 2956
duke@0 2957 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2958 // to do that and the compiler is using that register as one it can allocate.
duke@0 2959 // So we build it all by hand.
duke@0 2960 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 2961 // ArrayAddress dispatch(table, index);
duke@0 2962
duke@0 2963 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
duke@0 2964
duke@0 2965 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 2966 masm.jmp(dispatch);
duke@0 2967 %}
duke@0 2968
duke@0 2969 enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
duke@0 2970 MacroAssembler masm(&cbuf);
duke@0 2971
duke@0 2972 Register switch_reg = as_Register($switch_val$$reg);
duke@0 2973 Register dest_reg = as_Register($dest$$reg);
duke@0 2974 address table_base = masm.address_table_constant(_index2label);
duke@0 2975
duke@0 2976 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10
duke@0 2977 // to do that and the compiler is using that register as one it can allocate.
duke@0 2978 // So we build it all by hand.
duke@0 2979 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 2980 // ArrayAddress dispatch(table, index);
duke@0 2981
duke@0 2982 Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
duke@0 2983 masm.lea(dest_reg, InternalAddress(table_base));
duke@0 2984 masm.jmp(dispatch);
duke@0 2985
duke@0 2986 %}
duke@0 2987
duke@0 2988 enc_class lock_prefix()
duke@0 2989 %{
duke@0 2990 if (os::is_MP()) {
duke@0 2991 emit_opcode(cbuf, 0xF0); // lock
duke@0 2992 }
duke@0 2993 %}
duke@0 2994
duke@0 2995 enc_class REX_mem(memory mem)
duke@0 2996 %{
duke@0 2997 if ($mem$$base >= 8) {
duke@0 2998 if ($mem$$index < 8) {
duke@0 2999 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3000 } else {
duke@0 3001 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3002 }
duke@0 3003 } else {
duke@0 3004 if ($mem$$index >= 8) {
duke@0 3005 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3006 }
duke@0 3007 }
duke@0 3008 %}
duke@0 3009
duke@0 3010 enc_class REX_mem_wide(memory mem)
duke@0 3011 %{
duke@0 3012 if ($mem$$base >= 8) {
duke@0 3013 if ($mem$$index < 8) {
duke@0 3014 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3015 } else {
duke@0 3016 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3017 }
duke@0 3018 } else {
duke@0 3019 if ($mem$$index < 8) {
duke@0 3020 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3021 } else {
duke@0 3022 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3023 }
duke@0 3024 }
duke@0 3025 %}
duke@0 3026
duke@0 3027 // for byte regs
duke@0 3028 enc_class REX_breg(rRegI reg)
duke@0 3029 %{
duke@0 3030 if ($reg$$reg >= 4) {
duke@0 3031 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3032 }
duke@0 3033 %}
duke@0 3034
duke@0 3035 // for byte regs
duke@0 3036 enc_class REX_reg_breg(rRegI dst, rRegI src)
duke@0 3037 %{
duke@0 3038 if ($dst$$reg < 8) {
duke@0 3039 if ($src$$reg >= 4) {
duke@0 3040 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3041 }
duke@0 3042 } else {
duke@0 3043 if ($src$$reg < 8) {
duke@0 3044 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3045 } else {
duke@0 3046 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3047 }
duke@0 3048 }
duke@0 3049 %}
duke@0 3050
duke@0 3051 // for byte regs
duke@0 3052 enc_class REX_breg_mem(rRegI reg, memory mem)
duke@0 3053 %{
duke@0 3054 if ($reg$$reg < 8) {
duke@0 3055 if ($mem$$base < 8) {
duke@0 3056 if ($mem$$index >= 8) {
duke@0 3057 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3058 } else if ($reg$$reg >= 4) {
duke@0 3059 emit_opcode(cbuf, Assembler::REX);
duke@0 3060 }
duke@0 3061 } else {
duke@0 3062 if ($mem$$index < 8) {
duke@0 3063 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3064 } else {
duke@0 3065 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3066 }
duke@0 3067 }
duke@0 3068 } else {
duke@0 3069 if ($mem$$base < 8) {
duke@0 3070 if ($mem$$index < 8) {
duke@0 3071 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3072 } else {
duke@0 3073 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3074 }
duke@0 3075 } else {
duke@0 3076 if ($mem$$index < 8) {
duke@0 3077 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3078 } else {
duke@0 3079 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3080 }
duke@0 3081 }
duke@0 3082 }
duke@0 3083 %}
duke@0 3084
duke@0 3085 enc_class REX_reg(rRegI reg)
duke@0 3086 %{
duke@0 3087 if ($reg$$reg >= 8) {
duke@0 3088 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3089 }
duke@0 3090 %}
duke@0 3091
duke@0 3092 enc_class REX_reg_wide(rRegI reg)
duke@0 3093 %{
duke@0 3094 if ($reg$$reg < 8) {
duke@0 3095 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3096 } else {
duke@0 3097 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3098 }
duke@0 3099 %}
duke@0 3100
duke@0 3101 enc_class REX_reg_reg(rRegI dst, rRegI src)
duke@0 3102 %{
duke@0 3103 if ($dst$$reg < 8) {
duke@0 3104 if ($src$$reg >= 8) {
duke@0 3105 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3106 }
duke@0 3107 } else {
duke@0 3108 if ($src$$reg < 8) {
duke@0 3109 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3110 } else {
duke@0 3111 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3112 }
duke@0 3113 }
duke@0 3114 %}
duke@0 3115
duke@0 3116 enc_class REX_reg_reg_wide(rRegI dst, rRegI src)
duke@0 3117 %{
duke@0 3118 if ($dst$$reg < 8) {
duke@0 3119 if ($src$$reg < 8) {
duke@0 3120 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3121 } else {
duke@0 3122 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3123 }
duke@0 3124 } else {
duke@0 3125 if ($src$$reg < 8) {
duke@0 3126 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3127 } else {
duke@0 3128 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3129 }
duke@0 3130 }
duke@0 3131 %}
duke@0 3132
duke@0 3133 enc_class REX_reg_mem(rRegI reg, memory mem)
duke@0 3134 %{
duke@0 3135 if ($reg$$reg < 8) {
duke@0 3136 if ($mem$$base < 8) {
duke@0 3137 if ($mem$$index >= 8) {
duke@0 3138 emit_opcode(cbuf, Assembler::REX_X);
duke@0 3139 }
duke@0 3140 } else {
duke@0 3141 if ($mem$$index < 8) {
duke@0 3142 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3143 } else {
duke@0 3144 emit_opcode(cbuf, Assembler::REX_XB);
duke@0 3145 }
duke@0 3146 }
duke@0 3147 } else {
duke@0 3148 if ($mem$$base < 8) {
duke@0 3149 if ($mem$$index < 8) {
duke@0 3150 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3151 } else {
duke@0 3152 emit_opcode(cbuf, Assembler::REX_RX);
duke@0 3153 }
duke@0 3154 } else {
duke@0 3155 if ($mem$$index < 8) {
duke@0 3156 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3157 } else {
duke@0 3158 emit_opcode(cbuf, Assembler::REX_RXB);
duke@0 3159 }
duke@0 3160 }
duke@0 3161 }
duke@0 3162 %}
duke@0 3163
duke@0 3164 enc_class REX_reg_mem_wide(rRegL reg, memory mem)
duke@0 3165 %{
duke@0 3166 if ($reg$$reg < 8) {
duke@0 3167 if ($mem$$base < 8) {
duke@0 3168 if ($mem$$index < 8) {
duke@0 3169 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3170 } else {
duke@0 3171 emit_opcode(cbuf, Assembler::REX_WX);
duke@0 3172 }
duke@0 3173 } else {
duke@0 3174 if ($mem$$index < 8) {
duke@0 3175 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3176 } else {
duke@0 3177 emit_opcode(cbuf, Assembler::REX_WXB);
duke@0 3178 }
duke@0 3179 }
duke@0 3180 } else {
duke@0 3181 if ($mem$$base < 8) {
duke@0 3182 if ($mem$$index < 8) {
duke@0 3183 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3184 } else {
duke@0 3185 emit_opcode(cbuf, Assembler::REX_WRX);
duke@0 3186 }
duke@0 3187 } else {
duke@0 3188 if ($mem$$index < 8) {
duke@0 3189 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3190 } else {
duke@0 3191 emit_opcode(cbuf, Assembler::REX_WRXB);
duke@0 3192 }
duke@0 3193 }
duke@0 3194 }
duke@0 3195 %}
duke@0 3196
duke@0 3197 enc_class reg_mem(rRegI ereg, memory mem)
duke@0 3198 %{
duke@0 3199 // High registers handle in encode_RegMem
duke@0 3200 int reg = $ereg$$reg;
duke@0 3201 int base = $mem$$base;
duke@0 3202 int index = $mem$$index;
duke@0 3203 int scale = $mem$$scale;
duke@0 3204 int disp = $mem$$disp;
duke@0 3205 bool disp_is_oop = $mem->disp_is_oop();
duke@0 3206
duke@0 3207 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop);
duke@0 3208 %}
duke@0 3209
duke@0 3210 enc_class RM_opc_mem(immI rm_opcode, memory mem)
duke@0 3211 %{
duke@0 3212 int rm_byte_opcode = $rm_opcode$$constant;
duke@0 3213
duke@0 3214 // High registers handle in encode_RegMem
duke@0 3215 int base = $mem$$base;
duke@0 3216 int index = $mem$$index;
duke@0 3217 int scale = $mem$$scale;
duke@0 3218 int displace = $mem$$disp;
duke@0 3219
duke@0 3220 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when
duke@0 3221 // working with static
duke@0 3222 // globals
duke@0 3223 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace,
duke@0 3224 disp_is_oop);
duke@0 3225 %}
duke@0 3226
duke@0 3227 enc_class reg_lea(rRegI dst, rRegI src0, immI src1)
duke@0 3228 %{
duke@0 3229 int reg_encoding = $dst$$reg;
duke@0 3230 int base = $src0$$reg; // 0xFFFFFFFF indicates no base
duke@0 3231 int index = 0x04; // 0x04 indicates no index
duke@0 3232 int scale = 0x00; // 0x00 indicates no scale
duke@0 3233 int displace = $src1$$constant; // 0x00 indicates no displacement
duke@0 3234 bool disp_is_oop = false;
duke@0 3235 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace,
duke@0 3236 disp_is_oop);
duke@0 3237 %}
duke@0 3238
duke@0 3239 enc_class neg_reg(rRegI dst)
duke@0 3240 %{
duke@0 3241 int dstenc = $dst$$reg;
duke@0 3242 if (dstenc >= 8) {
duke@0 3243 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3244 dstenc -= 8;
duke@0 3245 }
duke@0 3246 // NEG $dst
duke@0 3247 emit_opcode(cbuf, 0xF7);
duke@0 3248 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3249 %}
duke@0 3250
duke@0 3251 enc_class neg_reg_wide(rRegI dst)
duke@0 3252 %{
duke@0 3253 int dstenc = $dst$$reg;
duke@0 3254 if (dstenc < 8) {
duke@0 3255 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3256 } else {
duke@0 3257 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3258 dstenc -= 8;
duke@0 3259 }
duke@0 3260 // NEG $dst
duke@0 3261 emit_opcode(cbuf, 0xF7);
duke@0 3262 emit_rm(cbuf, 0x3, 0x03, dstenc);
duke@0 3263 %}
duke@0 3264
duke@0 3265 enc_class setLT_reg(rRegI dst)
duke@0 3266 %{
duke@0 3267 int dstenc = $dst$$reg;
duke@0 3268 if (dstenc >= 8) {
duke@0 3269 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3270 dstenc -= 8;
duke@0 3271 } else if (dstenc >= 4) {
duke@0 3272 emit_opcode(cbuf, Assembler::REX);
duke@0 3273 }
duke@0 3274 // SETLT $dst
duke@0 3275 emit_opcode(cbuf, 0x0F);
duke@0 3276 emit_opcode(cbuf, 0x9C);
duke@0 3277 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3278 %}
duke@0 3279
duke@0 3280 enc_class setNZ_reg(rRegI dst)
duke@0 3281 %{
duke@0 3282 int dstenc = $dst$$reg;
duke@0 3283 if (dstenc >= 8) {
duke@0 3284 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3285 dstenc -= 8;
duke@0 3286 } else if (dstenc >= 4) {
duke@0 3287 emit_opcode(cbuf, Assembler::REX);
duke@0 3288 }
duke@0 3289 // SETNZ $dst
duke@0 3290 emit_opcode(cbuf, 0x0F);
duke@0 3291 emit_opcode(cbuf, 0x95);
duke@0 3292 emit_rm(cbuf, 0x3, 0x0, dstenc);
duke@0 3293 %}
duke@0 3294
duke@0 3295 enc_class enc_cmpLTP(no_rcx_RegI p, no_rcx_RegI q, no_rcx_RegI y,
duke@0 3296 rcx_RegI tmp)
duke@0 3297 %{
duke@0 3298 // cadd_cmpLT
duke@0 3299
duke@0 3300 int tmpReg = $tmp$$reg;
duke@0 3301
duke@0 3302 int penc = $p$$reg;
duke@0 3303 int qenc = $q$$reg;
duke@0 3304 int yenc = $y$$reg;
duke@0 3305
duke@0 3306 // subl $p,$q
duke@0 3307 if (penc < 8) {
duke@0 3308 if (qenc >= 8) {
duke@0 3309 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3310 }
duke@0 3311 } else {
duke@0 3312 if (qenc < 8) {
duke@0 3313 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3314 } else {
duke@0 3315 emit_opcode(cbuf, Assembler::REX_RB);
duke@0 3316 }
duke@0 3317 }
duke@0 3318 emit_opcode(cbuf, 0x2B);
duke@0 3319 emit_rm(cbuf, 0x3, penc & 7, qenc & 7);
duke@0 3320
duke@0 3321 // sbbl $tmp, $tmp
duke@0 3322 emit_opcode(cbuf, 0x1B);
duke@0 3323 emit_rm(cbuf, 0x3, tmpReg, tmpReg);
duke@0 3324
duke@0 3325 // andl $tmp, $y
duke@0 3326 if (yenc >= 8) {
duke@0 3327 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3328 }
duke@0 3329 emit_opcode(cbuf, 0x23);
duke@0 3330 emit_rm(cbuf, 0x3, tmpReg, yenc & 7);
duke@0 3331
duke@0 3332 // addl $p,$tmp
duke@0 3333 if (penc >= 8) {
duke@0 3334 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3335 }
duke@0 3336 emit_opcode(cbuf, 0x03);
duke@0 3337 emit_rm(cbuf, 0x3, penc & 7, tmpReg);
duke@0 3338 %}
duke@0 3339
duke@0 3340 // Compare the lonogs and set -1, 0, or 1 into dst
duke@0 3341 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst)
duke@0 3342 %{
duke@0 3343 int src1enc = $src1$$reg;
duke@0 3344 int src2enc = $src2$$reg;
duke@0 3345 int dstenc = $dst$$reg;
duke@0 3346
duke@0 3347 // cmpq $src1, $src2
duke@0 3348 if (src1enc < 8) {
duke@0 3349 if (src2enc < 8) {
duke@0 3350 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3351 } else {
duke@0 3352 emit_opcode(cbuf, Assembler::REX_WB);
duke@0 3353 }
duke@0 3354 } else {
duke@0 3355 if (src2enc < 8) {
duke@0 3356 emit_opcode(cbuf, Assembler::REX_WR);
duke@0 3357 } else {
duke@0 3358 emit_opcode(cbuf, Assembler::REX_WRB);
duke@0 3359 }
duke@0 3360 }
duke@0 3361 emit_opcode(cbuf, 0x3B);
duke@0 3362 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7);
duke@0 3363
duke@0 3364 // movl $dst, -1
duke@0 3365 if (dstenc >= 8) {
duke@0 3366 emit_opcode(cbuf, Assembler::REX_B);
duke@0 3367 }
duke@0 3368 emit_opcode(cbuf, 0xB8 | (dstenc & 7));
duke@0 3369 emit_d32(cbuf, -1);
duke@0 3370
duke@0 3371 // jl,s done
duke@0 3372 emit_opcode(cbuf, 0x7C);
duke@0 3373 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08);
duke@0 3374
duke@0 3375 // setne $dst
duke@0 3376 if (dstenc >= 4) {
duke@0 3377 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B);
duke@0 3378 }
duke@0 3379 emit_opcode(cbuf, 0x0F);
duke@0 3380 emit_opcode(cbuf, 0x95);
duke@0 3381 emit_opcode(cbuf, 0xC0 | (dstenc & 7));
duke@0 3382
duke@0 3383 // movzbl $dst, $dst
duke@0 3384 if (dstenc >= 4) {
duke@0 3385 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB);
duke@0 3386 }
duke@0 3387 emit_opcode(cbuf, 0x0F);
duke@0 3388 emit_opcode(cbuf, 0xB6);
duke@0 3389 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7);
duke@0 3390 %}
duke@0 3391
duke@0 3392 enc_class Push_ResultXD(regD dst) %{
duke@0 3393 int dstenc = $dst$$reg;
duke@0 3394
duke@0 3395 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP]
duke@0 3396
duke@0 3397 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp]
duke@0 3398 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
duke@0 3399 if (dstenc >= 8) {
duke@0 3400 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3401 }
duke@0 3402 emit_opcode (cbuf, 0x0F );
duke@0 3403 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 );
duke@0 3404 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3405
duke@0 3406 // add rsp,8
duke@0 3407 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3408 emit_opcode(cbuf,0x83);
duke@0 3409 emit_rm(cbuf,0x3, 0x0, RSP_enc);
duke@0 3410 emit_d8(cbuf,0x08);
duke@0 3411 %}
duke@0 3412
duke@0 3413 enc_class Push_SrcXD(regD src) %{
duke@0 3414 int srcenc = $src$$reg;
duke@0 3415
duke@0 3416 // subq rsp,#8
duke@0 3417 emit_opcode(cbuf, Assembler::REX_W);
duke@0 3418 emit_opcode(cbuf, 0x83);
duke@0 3419 emit_rm(cbuf, 0x3, 0x5, RSP_enc);
duke@0 3420 emit_d8(cbuf, 0x8);
duke@0 3421
duke@0 3422 // movsd [rsp],src
duke@0 3423 emit_opcode(cbuf, 0xF2);
duke@0 3424 if (srcenc >= 8) {
duke@0 3425 emit_opcode(cbuf, Assembler::REX_R);
duke@0 3426 }
duke@0 3427 emit_opcode(cbuf, 0x0F);
duke@0 3428 emit_opcode(cbuf, 0x11);
duke@0 3429 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false);
duke@0 3430
duke@0 3431 // fldd [rsp]
duke@0 3432 emit_opcode(cbuf, 0x66);
duke@0 3433 emit_opcode(cbuf, 0xDD);
duke@0 3434 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false);
duke@0 3435 %}
duke@0 3436
duke@0 3437
duke@0 3438 enc_class movq_ld(regD dst, memory mem) %{
duke@0 3439 MacroAssembler _masm(&cbuf);
twisti@603 3440 __ movq($dst$$XMMRegister, $mem$$Address);
duke@0 3441 %}
duke@0 3442
duke@0 3443 enc_class movq_st(memory mem, regD src) %{
duke@0 3444 MacroAssembler _masm(&cbuf);
twisti@603 3445 __ movq($mem$$Address, $src$$XMMRegister);
duke@0 3446 %}
duke@0 3447
duke@0 3448 enc_class pshufd_8x8(regF dst, regF src) %{
duke@0 3449 MacroAssembler _masm(&cbuf);
duke@0 3450
duke@0 3451 encode_CopyXD(cbuf, $dst$$reg, $src$$reg);
duke@0 3452 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg));
duke@0 3453 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00);
duke@0 3454 %}
duke@0 3455
duke@0 3456 enc_class pshufd_4x16(regF dst, regF src) %{
duke@0 3457 MacroAssembler _masm(&cbuf);
duke@0 3458
duke@0 3459 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00);
duke@0 3460 %}
duke@0 3461
duke@0 3462 enc_class pshufd(regD dst, regD src, int mode) %{
duke@0 3463 MacroAssembler _masm(&cbuf);
duke@0 3464
duke@0 3465 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode);
duke@0 3466 %}
duke@0 3467
duke@0 3468 enc_class pxor(regD dst, regD src) %{
duke@0 3469 MacroAssembler _masm(&cbuf);
duke@0 3470
duke@0 3471 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg));
duke@0 3472 %}
duke@0 3473
duke@0 3474 enc_class mov_i2x(regD dst, rRegI src) %{
duke@0 3475 MacroAssembler _masm(&cbuf);
duke@0 3476
duke@0 3477 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
duke@0 3478 %}
duke@0 3479
duke@0 3480 // obj: object to lock
duke@0 3481 // box: box address (header location) -- killed
duke@0 3482 // tmp: rax -- killed
duke@0 3483 // scr: rbx -- killed
duke@0 3484 //
duke@0 3485 // What follows is a direct transliteration of fast_lock() and fast_unlock()
duke@0 3486 // from i486.ad. See that file for comments.
duke@0 3487 // TODO: where possible switch from movq (r, 0) to movl(r,0) and
duke@0 3488 // use the shorter encoding. (Movl clears the high-order 32-bits).
duke@0 3489
duke@0 3490
duke@0 3491 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
duke@0 3492 %{
duke@0 3493 Register objReg = as_Register((int)$obj$$reg);
duke@0 3494 Register boxReg = as_Register((int)$box$$reg);
duke@0 3495 Register tmpReg = as_Register($tmp$$reg);
duke@0 3496 Register scrReg = as_Register($scr$$reg);
duke@0 3497 MacroAssembler masm(&cbuf);
duke@0 3498
duke@0 3499 // Verify uniqueness of register assignments -- necessary but not sufficient
duke@0 3500 assert (objReg != boxReg && objReg != tmpReg &&
duke@0 3501 objReg != scrReg && tmpReg != scrReg, "invariant") ;
duke@0 3502
duke@0 3503 if (_counters != NULL) {
duke@0 3504 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
duke@0 3505 }
duke@0 3506 if (EmitSync & 1) {
never@297 3507 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@297 3508 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@297 3509 masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
duke@0 3510 } else
duke@0 3511 if (EmitSync & 2) {
duke@0 3512 Label DONE_LABEL;
duke@0 3513 if (UseBiasedLocking) {
duke@0 3514 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
duke@0 3515 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
duke@0 3516 }
never@297 3517 // QQQ was movl...
never@297 3518 masm.movptr(tmpReg, 0x1);
never@297 3519 masm.orptr(tmpReg, Address(objReg, 0));
never@297 3520 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3521 if (os::is_MP()) {
duke@0 3522 masm.lock();
duke@0 3523 }
never@297 3524 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3525 masm.jcc(Assembler::equal, DONE_LABEL);
duke@0 3526
duke@0 3527 // Recursive locking
never@297 3528 masm.subptr(tmpReg, rsp);
never@297 3529 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@297 3530 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3531
duke@0 3532 masm.bind(DONE_LABEL);
duke@0 3533 masm.nop(); // avoid branch to branch
duke@0 3534 } else {
duke@0 3535 Label DONE_LABEL, IsInflated, Egress;
duke@0 3536
never@297 3537 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@297 3538 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
never@297 3539 masm.jcc (Assembler::notZero, IsInflated) ;
never@297 3540
duke@0 3541 // it's stack-locked, biased or neutral
duke@0 3542 // TODO: optimize markword triage order to reduce the number of
duke@0 3543 // conditional branches in the most common cases.
duke@0 3544 // Beware -- there's a subtle invariant that fetch of the markword
duke@0 3545 // at [FETCH], below, will never observe a biased encoding (*101b).
duke@0 3546 // If this invariant is not held we'll suffer exclusion (safety) failure.
duke@0 3547
kvn@411 3548 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3549 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
never@297 3550 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
duke@0 3551 }
duke@0 3552
never@297 3553 // was q will it destroy high?
never@297 3554 masm.orl (tmpReg, 1) ;
never@297 3555 masm.movptr(Address(boxReg, 0), tmpReg) ;
never@297 3556 if (os::is_MP()) { masm.lock(); }
never@297 3557 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
duke@0 3558 if (_counters != NULL) {
duke@0 3559 masm.cond_inc32(Assembler::equal,
duke@0 3560 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3561 }
duke@0 3562 masm.jcc (Assembler::equal, DONE_LABEL);
duke@0 3563
duke@0 3564 // Recursive locking
never@297 3565 masm.subptr(tmpReg, rsp);
never@297 3566 masm.andptr(tmpReg, 7 - os::vm_page_size());
never@297 3567 masm.movptr(Address(boxReg, 0), tmpReg);
duke@0 3568 if (_counters != NULL) {
duke@0 3569 masm.cond_inc32(Assembler::equal,
duke@0 3570 ExternalAddress((address) _counters->fast_path_entry_count_addr()));
duke@0 3571 }
duke@0 3572 masm.jmp (DONE_LABEL) ;
duke@0 3573
duke@0 3574 masm.bind (IsInflated) ;
duke@0 3575 // It's inflated
duke@0 3576
duke@0 3577 // TODO: someday avoid the ST-before-CAS penalty by
duke@0 3578 // relocating (deferring) the following ST.
duke@0 3579 // We should also think about trying a CAS without having
duke@0 3580 // fetched _owner. If the CAS is successful we may
duke@0 3581 // avoid an RTO->RTS upgrade on the $line.
never@297 3582 // Without cast to int32_t a movptr will destroy r10 which is typically obj
never@297 3583 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
never@297 3584
never@297 3585 masm.mov (boxReg, tmpReg) ;
never@297 3586 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@297 3587 masm.testptr(tmpReg, tmpReg) ;
never@297 3588 masm.jcc (Assembler::notZero, DONE_LABEL) ;
duke@0 3589
duke@0 3590 // It's inflated and appears unlocked
never@297 3591 if (os::is_MP()) { masm.lock(); }
never@297 3592 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
duke@0 3593 // Intentional fall-through into DONE_LABEL ...
duke@0 3594
duke@0 3595 masm.bind (DONE_LABEL) ;
duke@0 3596 masm.nop () ; // avoid jmp to jmp
duke@0 3597 }
duke@0 3598 %}
duke@0 3599
duke@0 3600 // obj: object to unlock
duke@0 3601 // box: box address (displaced header location), killed
duke@0 3602 // RBX: killed tmp; cannot be obj nor box
duke@0 3603 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
duke@0 3604 %{
duke@0 3605
duke@0 3606 Register objReg = as_Register($obj$$reg);
duke@0 3607 Register boxReg = as_Register($box$$reg);
duke@0 3608 Register tmpReg = as_Register($tmp$$reg);
duke@0 3609 MacroAssembler masm(&cbuf);
duke@0 3610
never@297 3611 if (EmitSync & 4) {
never@297 3612 masm.cmpptr(rsp, 0) ;
duke@0 3613 } else
duke@0 3614 if (EmitSync & 8) {
duke@0 3615 Label DONE_LABEL;
duke@0 3616 if (UseBiasedLocking) {
duke@0 3617 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3618 }
duke@0 3619
duke@0 3620 // Check whether the displaced header is 0
duke@0 3621 //(=> recursive unlock)
never@297 3622 masm.movptr(tmpReg, Address(boxReg, 0));
never@297 3623 masm.testptr(tmpReg, tmpReg);
duke@0 3624 masm.jcc(Assembler::zero, DONE_LABEL);
duke@0 3625
duke@0 3626 // If not recursive lock, reset the header to displaced header
duke@0 3627 if (os::is_MP()) {
duke@0 3628 masm.lock();
duke@0 3629 }
never@297 3630 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3631 masm.bind(DONE_LABEL);
duke@0 3632 masm.nop(); // avoid branch to branch
duke@0 3633 } else {
duke@0 3634 Label DONE_LABEL, Stacked, CheckSucc ;
duke@0 3635
kvn@411 3636 if (UseBiasedLocking && !UseOptoBiasInlining) {
duke@0 3637 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
duke@0 3638 }
never@297 3639
never@297 3640 masm.movptr(tmpReg, Address(objReg, 0)) ;
never@297 3641 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
never@297 3642 masm.jcc (Assembler::zero, DONE_LABEL) ;
never@297 3643 masm.testl (tmpReg, 0x02) ;
never@297 3644 masm.jcc (Assembler::zero, Stacked) ;
never@297 3645
duke@0 3646 // It's inflated
never@297 3647 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
never@297 3648 masm.xorptr(boxReg, r15_thread) ;
never@297 3649 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
never@297 3650 masm.jcc (Assembler::notZero, DONE_LABEL) ;
never@297 3651 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
never@297 3652 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
never@297 3653 masm.jcc (Assembler::notZero, CheckSucc) ;
never@297 3654 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
never@297 3655 masm.jmp (DONE_LABEL) ;
never@297 3656
never@297 3657 if ((EmitSync & 65536) == 0) {
duke@0 3658 Label LSuccess, LGoSlowPath ;
duke@0 3659 masm.bind (CheckSucc) ;
never@297 3660 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3661 masm.jcc (Assembler::zero, LGoSlowPath) ;
duke@0 3662
duke@0 3663 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
duke@0 3664 // the explicit ST;MEMBAR combination, but masm doesn't currently support
duke@0 3665 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
duke@0 3666 // are all faster when the write buffer is populated.
never@297 3667 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3668 if (os::is_MP()) {
never@297 3669 masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
duke@0 3670 }
never@297 3671 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
duke@0 3672 masm.jcc (Assembler::notZero, LSuccess) ;
duke@0 3673
never@297 3674 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
duke@0 3675 if (os::is_MP()) { masm.lock(); }
never@297 3676 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
duke@0 3677 masm.jcc (Assembler::notEqual, LSuccess) ;
duke@0 3678 // Intentional fall-through into slow-path
duke@0 3679
duke@0 3680 masm.bind (LGoSlowPath) ;
duke@0 3681 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
duke@0 3682 masm.jmp (DONE_LABEL) ;
duke@0 3683
duke@0 3684 masm.bind (LSuccess) ;
duke@0 3685 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success
duke@0 3686 masm.jmp (DONE_LABEL) ;
duke@0 3687 }
duke@0 3688
never@297 3689 masm.bind (Stacked) ;
never@297 3690 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
never@297 3691 if (os::is_MP()) { masm.lock(); }
never@297 3692 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
duke@0 3693
duke@0 3694 if (EmitSync & 65536) {
duke@0 3695 masm.bind (CheckSucc) ;
duke@0 3696 }
duke@0 3697 masm.bind(DONE_LABEL);
duke@0 3698 if (EmitSync & 32768) {
duke@0 3699 masm.nop(); // avoid branch to branch
duke@0 3700 }
duke@0 3701 }
duke@0 3702 %}
duke@0 3703
cfang@674 3704 enc_class enc_String_Compare(rdi_RegP str1, rsi_RegP str2, regD tmp1, regD tmp2,
cfang@674 3705 rax_RegI tmp3, rbx_RegI tmp4, rcx_RegI result) %{
duke@0 3706 Label RCX_GOOD_LABEL, LENGTH_DIFF_LABEL,
duke@0 3707 POP_LABEL, DONE_LABEL, CONT_LABEL,
duke@0 3708 WHILE_HEAD_LABEL;
duke@0 3709 MacroAssembler masm(&cbuf);
duke@0 3710
cfang@674 3711 XMMRegister tmp1Reg = as_XMMRegister($tmp1$$reg);
cfang@674 3712 XMMRegister tmp2Reg = as_XMMRegister($tmp2$$reg);
cfang@674 3713
duke@0 3714 // Get the first character position in both strings
duke@0 3715 // [8] char array, [12] offset, [16] count
duke@0 3716 int value_offset = java_lang_String::value_offset_in_bytes();
duke@0 3717 int offset_offset = java_lang_String::offset_offset_in_bytes();
duke@0 3718 int count_offset = java_lang_String::count_offset_in_bytes();
duke@0 3719 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
duke@0 3720
coleenp@108 3721 masm.load_heap_oop(rax, Address(rsi, value_offset));
duke@0 3722 masm.movl(rcx, Address(rsi, offset_offset));
never@297 3723 masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
coleenp@108 3724 masm.load_heap_oop(rbx, Address(rdi, value_offset));
duke@0 3725 masm.movl(rcx, Address(rdi, offset_offset));
never@297 3726 masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
duke@0 3727
duke@0 3728 // Compute the minimum of the string lengths(rsi) and the
duke@0 3729 // difference of the string lengths (stack)
duke@0 3730
cfang@674 3731 // do the conditional move stuff
duke@0 3732 masm.movl(rdi, Address(rdi, count_offset));
duke@0 3733 masm.movl(rsi, Address(rsi, count_offset));
duke@0 3734 masm.movl(rcx, rdi);
duke@0 3735 masm.subl(rdi, rsi);
never@297 3736 masm.push(rdi);
never@297 3737 masm.cmov(Assembler::lessEqual, rsi, rcx);
duke@0 3738
duke@0 3739 // Is the minimum length zero?
duke@0 3740 masm.bind(RCX_GOOD_LABEL);
duke@0 3741 masm.testl(rsi, rsi);
duke@0 3742 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
duke@0 3743
duke@0 3744 // Load first characters
jrose@601 3745 masm.load_unsigned_short(rcx, Address(rbx, 0));
jrose@601 3746 masm.load_unsigned_short(rdi, Address(rax, 0));
duke@0 3747
duke@0 3748 // Compare first characters
duke@0 3749 masm.subl(rcx, rdi);
duke@0 3750 masm.jcc(Assembler::notZero, POP_LABEL);
duke@0 3751 masm.decrementl(rsi);
duke@0 3752 masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
duke@0 3753
duke@0 3754 {
duke@0 3755 // Check after comparing first character to see if strings are equivalent
duke@0 3756 Label LSkip2;
duke@0 3757 // Check if the strings start at same location
never@297 3758 masm.cmpptr(rbx, rax);
cfang@674 3759 masm.jccb(Assembler::notEqual, LSkip2);
duke@0 3760
duke@0 3761 // Check if the length difference is zero (from stack)
duke@0 3762 masm.cmpl(Address(rsp, 0), 0x0);
duke@0 3763 masm.jcc(Assembler::equal, LENGTH_DIFF_LABEL);
duke@0 3764
duke@0 3765 // Strings might not be equivalent
duke@0 3766 masm.bind(LSkip2);
duke@0 3767 }
duke@0 3768
cfang@674 3769 // Advance to next character
cfang@674 3770 masm.addptr(rax, 2);
cfang@674 3771 masm.addptr(rbx, 2);
cfang@674 3772
cfang@674 3773 if (UseSSE42Intrinsics) {
cfang@674 3774 // With SSE4.2, use double quad vector compare
cfang@674 3775 Label COMPARE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
cfang@674 3776 // Setup to compare 16-byte vectors
cfang@674 3777 masm.movl(rdi, rsi);
cfang@674 3778 masm.andl(rsi, 0xfffffff8); // rsi holds the vector count
cfang@674 3779 masm.andl(rdi, 0x00000007); // rdi holds the tail count
cfang@674 3780 masm.testl(rsi, rsi);
cfang@674 3781 masm.jccb(Assembler::zero, COMPARE_TAIL);
cfang@674 3782
cfang@674 3783 masm.lea(rax, Address(rax, rsi, Address::times_2));
cfang@674 3784 masm.lea(rbx, Address(rbx, rsi, Address::times_2));
cfang@674 3785 masm.negptr(rsi);
cfang@674 3786
cfang@674 3787 masm.bind(COMPARE_VECTORS);
cfang@674 3788 masm.movdqu(tmp1Reg, Address(rax, rsi, Address::times_2));
cfang@674 3789 masm.movdqu(tmp2Reg, Address(rbx, rsi, Address::times_2));
cfang@674 3790 masm.pxor(tmp1Reg, tmp2Reg);
cfang@674 3791 masm.ptest(tmp1Reg, tmp1Reg);
cfang@674 3792 masm.jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
cfang@674 3793 masm.addptr(rsi, 8);
cfang@674 3794 masm.jcc(Assembler::notZero, COMPARE_VECTORS);
cfang@674 3795 masm.jmpb(COMPARE_TAIL);
cfang@674 3796
cfang@674 3797 // Mismatched characters in the vectors
cfang@674 3798 masm.bind(VECTOR_NOT_EQUAL);
cfang@674 3799 masm.lea(rax, Address(rax, rsi, Address::times_2));
cfang@674 3800 masm.lea(rbx, Address(rbx, rsi, Address::times_2));
cfang@674 3801 masm.movl(rdi, 8);
cfang@674 3802
cfang@674 3803 // Compare tail (< 8 chars), or rescan last vectors to
cfang@674 3804 // find 1st mismatched characters
cfang@674 3805 masm.bind(COMPARE_TAIL);
cfang@674 3806 masm.testl(rdi, rdi);
cfang@674 3807 masm.jccb(Assembler::zero, LENGTH_DIFF_LABEL);
cfang@674 3808 masm.movl(rsi, rdi);
cfang@674 3809 // Fallthru to tail compare
cfang@674 3810 }
cfang@674 3811
duke@0 3812 // Shift RAX and RBX to the end of the arrays, negate min
cfang@674 3813 masm.lea(rax, Address(rax, rsi, Address::times_2, 0));
cfang@674 3814 masm.lea(rbx, Address(rbx, rsi, Address::times_2, 0));
never@297 3815 masm.negptr