annotate src/cpu/ppc/vm/macroAssembler_ppc.hpp @ 9005:c4567d28f31f

8185979: PPC64: Implement SHA2 intrinsic Reviewed-by: mdoerr, goetz Contributed-by: Bruno Rosa <bruno.rosa@eldorado.org.br>, Gustavo Serra Scalet <gustavo.scalet@eldorado.org.br>, Igor Nunes <igor.nunes@eldorado.org.br>, Martin Doerr <martin.doerr@sap.com>
author ogatak
date Tue, 18 Jun 2019 09:33:34 -0400
parents 32bc598624bd
children
rev   line source
goetz@6022 1 /*
mdoerr@8547 2 * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
phh@8974 3 * Copyright (c) 2012, 2017 SAP AG. All rights reserved.
goetz@6022 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
goetz@6022 5 *
goetz@6022 6 * This code is free software; you can redistribute it and/or modify it
goetz@6022 7 * under the terms of the GNU General Public License version 2 only, as
goetz@6022 8 * published by the Free Software Foundation.
goetz@6022 9 *
goetz@6022 10 * This code is distributed in the hope that it will be useful, but WITHOUT
goetz@6022 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
goetz@6022 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
goetz@6022 13 * version 2 for more details (a copy is included in the LICENSE file that
goetz@6022 14 * accompanied this code).
goetz@6022 15 *
goetz@6022 16 * You should have received a copy of the GNU General Public License version
goetz@6022 17 * 2 along with this work; if not, write to the Free Software Foundation,
goetz@6022 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
goetz@6022 19 *
goetz@6022 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
goetz@6022 21 * or visit www.oracle.com if you need additional information or have any
goetz@6022 22 * questions.
goetz@6022 23 *
goetz@6022 24 */
goetz@6022 25
goetz@6022 26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
goetz@6022 27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
goetz@6022 28
goetz@6022 29 #include "asm/assembler.hpp"
goetz@6022 30
goetz@6022 31 // MacroAssembler extends Assembler by a few frequently used macros.
goetz@6022 32
goetz@6022 33 class ciTypeArray;
goetz@6022 34
goetz@6022 35 class MacroAssembler: public Assembler {
goetz@6022 36 public:
goetz@6022 37 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
goetz@6022 38
goetz@6022 39 //
goetz@6022 40 // Optimized instruction emitters
goetz@6022 41 //
goetz@6022 42
goetz@6022 43 inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
goetz@6022 44 inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
goetz@6022 45
goetz@6022 46 // load d = *[a+si31]
goetz@6022 47 // Emits several instructions if the offset is not encodable in one instruction.
goetz@6022 48 void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
goetz@6022 49 void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop);
goetz@6022 50 inline static bool is_ld_largeoffset(address a);
goetz@6022 51 inline static int get_ld_largeoffset_offset(address a);
goetz@6022 52
goetz@6022 53 inline void round_to(Register r, int modulus);
goetz@6022 54
goetz@6022 55 // Load/store with type given by parameter.
goetz@6022 56 void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
goetz@6022 57 void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
goetz@6022 58
goetz@6022 59 // Move register if destination register and target register are different
goetz@6022 60 inline void mr_if_needed(Register rd, Register rs);
goetz@6059 61 inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
goetz@6059 62 // This is dedicated for emitting scheduled mach nodes. For better
goetz@6059 63 // readability of the ad file I put it here.
goetz@6059 64 // Endgroups are not needed if
goetz@6059 65 // - the scheduler is off
goetz@6059 66 // - the scheduler found that there is a natural group end, in that
goetz@6059 67 // case it reduced the size of the instruction used in the test
goetz@6059 68 // yielding 'needed'.
goetz@6059 69 inline void endgroup_if_needed(bool needed);
goetz@6059 70
goetz@6059 71 // Memory barriers.
goetz@6059 72 inline void membar(int bits);
goetz@6059 73 inline void release();
goetz@6059 74 inline void acquire();
goetz@6059 75 inline void fence();
goetz@6022 76
goetz@6022 77 // nop padding
goetz@6059 78 void align(int modulus, int max = 252, int rem = 0);
goetz@6022 79
goetz@6022 80 //
goetz@6022 81 // Constants, loading constants, TOC support
goetz@6022 82 //
goetz@6022 83
goetz@6022 84 // Address of the global TOC.
goetz@6022 85 inline static address global_toc();
goetz@6022 86 // Offset of given address to the global TOC.
goetz@6022 87 inline static int offset_to_global_toc(const address addr);
goetz@6022 88
goetz@6022 89 // Address of TOC of the current method.
goetz@6022 90 inline address method_toc();
goetz@6022 91 // Offset of given address to TOC of the current method.
goetz@6022 92 inline int offset_to_method_toc(const address addr);
goetz@6022 93
goetz@6022 94 // Global TOC.
goetz@6022 95 void calculate_address_from_global_toc(Register dst, address addr,
goetz@6022 96 bool hi16 = true, bool lo16 = true,
goetz@6022 97 bool add_relocation = true, bool emit_dummy_addr = false);
goetz@6022 98 inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
goetz@6022 99 calculate_address_from_global_toc(dst, addr, true, false);
goetz@6022 100 };
goetz@6022 101 inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
goetz@6022 102 calculate_address_from_global_toc(dst, addr, false, true);
goetz@6022 103 };
goetz@6022 104
goetz@6022 105 inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
goetz@6022 106 static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
goetz@6022 107 static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
goetz@6022 108
goetz@6022 109 #ifdef _LP64
goetz@6022 110 // Patch narrow oop constant.
goetz@6022 111 inline static bool is_set_narrow_oop(address a, address bound);
goetz@6022 112 static int patch_set_narrow_oop(address a, address bound, narrowOop data);
goetz@6022 113 static narrowOop get_narrow_oop(address a, address bound);
goetz@6022 114 #endif
goetz@6022 115
goetz@6022 116 inline static bool is_load_const_at(address a);
goetz@6022 117
goetz@6022 118 // Emits an oop const to the constant pool, loads the constant, and
goetz@6022 119 // sets a relocation info with address current_pc.
goetz@6022 120 void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
goetz@6022 121 void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
goetz@6022 122 assert(dst == R2_TOC, "base register must be TOC");
goetz@6022 123 load_const_from_method_toc(dst, a, toc);
goetz@6022 124 }
goetz@6022 125
goetz@6022 126 static bool is_load_const_from_method_toc_at(address a);
goetz@6022 127 static int get_offset_of_load_const_from_method_toc_at(address a);
goetz@6022 128
goetz@6022 129 // Get the 64 bit constant from a `load_const' sequence.
goetz@6022 130 static long get_const(address load_const);
goetz@6022 131
goetz@6022 132 // Patch the 64 bit constant of a `load_const' sequence. This is a
goetz@6022 133 // low level procedure. It neither flushes the instruction cache nor
goetz@6022 134 // is it atomic.
goetz@6022 135 static void patch_const(address load_const, long x);
goetz@6022 136
goetz@6022 137 // Metadata in code that we have to keep track of.
goetz@6022 138 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
goetz@6022 139 AddressLiteral constant_metadata_address(Metadata* obj); // find_index
goetz@6022 140 // Oops used directly in compiled code are stored in the constant pool,
goetz@6022 141 // and loaded from there.
goetz@6022 142 // Allocate new entry for oop in constant pool. Generate relocation.
goetz@6022 143 AddressLiteral allocate_oop_address(jobject obj);
goetz@6022 144 // Find oop obj in constant pool. Return relocation with it's index.
goetz@6022 145 AddressLiteral constant_oop_address(jobject obj);
goetz@6022 146
goetz@6022 147 // Find oop in constant pool and emit instructions to load it.
goetz@6022 148 // Uses constant_oop_address.
goetz@6022 149 inline void set_oop_constant(jobject obj, Register d);
goetz@6022 150 // Same as load_address.
goetz@6022 151 inline void set_oop (AddressLiteral obj_addr, Register d);
goetz@6022 152
goetz@6022 153 // Read runtime constant: Issue load if constant not yet established,
goetz@6022 154 // else use real constant.
goetz@6022 155 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
goetz@6022 156 Register tmp,
goetz@6022 157 int offset);
goetz@6022 158
goetz@6022 159 //
goetz@6022 160 // branch, jump
goetz@6022 161 //
goetz@6022 162
goetz@6022 163 inline void pd_patch_instruction(address branch, address target);
goetz@6022 164 NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
goetz@6022 165
goetz@6022 166 // Conditional far branch for destinations encodable in 24+2 bits.
goetz@6022 167 // Same interface as bc, e.g. no inverse boint-field.
goetz@6022 168 enum {
goetz@6022 169 bc_far_optimize_not = 0,
goetz@6022 170 bc_far_optimize_on_relocate = 1
goetz@6022 171 };
goetz@6022 172 // optimize: flag for telling the conditional far branch to optimize
goetz@6022 173 // itself when relocated.
goetz@6022 174 void bc_far(int boint, int biint, Label& dest, int optimize);
goetz@6022 175 // Relocation of conditional far branches.
goetz@6022 176 static bool is_bc_far_at(address instruction_addr);
goetz@6022 177 static address get_dest_of_bc_far_at(address instruction_addr);
goetz@6022 178 static void set_dest_of_bc_far_at(address instruction_addr, address dest);
goetz@6022 179 private:
goetz@6022 180 static bool inline is_bc_far_variant1_at(address instruction_addr);
goetz@6022 181 static bool inline is_bc_far_variant2_at(address instruction_addr);
goetz@6022 182 static bool inline is_bc_far_variant3_at(address instruction_addr);
goetz@6022 183 public:
goetz@6022 184
goetz@6022 185 // Convenience bc_far versions.
goetz@6022 186 inline void blt_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 187 inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 188 inline void beq_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 189 inline void bso_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 190 inline void bge_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 191 inline void ble_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 192 inline void bne_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 193 inline void bns_far(ConditionRegister crx, Label& L, int optimize);
goetz@6022 194
goetz@6022 195 // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
goetz@6022 196 private:
goetz@6022 197 enum {
goetz@6022 198 bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
goetz@6022 199 bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord,
goetz@6022 200 bxx64_patchable_ret_addr_offset = bxx64_patchable_size
goetz@6022 201 };
goetz@6022 202 void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
goetz@6022 203 static bool is_bxx64_patchable_at( address instruction_addr, bool link);
goetz@6022 204 // Does the instruction use a pc-relative encoding of the destination?
goetz@6022 205 static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
goetz@6022 206 static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link);
goetz@6022 207 // Load destination relative to global toc.
goetz@6022 208 static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link);
goetz@6022 209 static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link);
goetz@6022 210 static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link);
goetz@6022 211 static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
goetz@6022 212
goetz@6022 213 public:
goetz@6022 214 // call
goetz@6022 215 enum {
goetz@6022 216 bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
goetz@6022 217 bl64_patchable_size = bxx64_patchable_size,
goetz@6022 218 bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset
goetz@6022 219 };
goetz@6022 220 inline void bl64_patchable(address target, relocInfo::relocType rt) {
goetz@6022 221 bxx64_patchable(target, rt, /*link=*/true);
goetz@6022 222 }
goetz@6022 223 inline static bool is_bl64_patchable_at(address instruction_addr) {
goetz@6022 224 return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
goetz@6022 225 }
goetz@6022 226 inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
goetz@6022 227 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
goetz@6022 228 }
goetz@6022 229 inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
goetz@6022 230 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
goetz@6022 231 }
goetz@6022 232 inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
goetz@6022 233 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
goetz@6022 234 }
goetz@6022 235 // jump
goetz@6022 236 enum {
goetz@6022 237 b64_patchable_instruction_count = bxx64_patchable_instruction_count,
goetz@6022 238 b64_patchable_size = bxx64_patchable_size,
goetz@6022 239 };
goetz@6022 240 inline void b64_patchable(address target, relocInfo::relocType rt) {
goetz@6022 241 bxx64_patchable(target, rt, /*link=*/false);
goetz@6022 242 }
goetz@6022 243 inline static bool is_b64_patchable_at(address instruction_addr) {
goetz@6022 244 return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
goetz@6022 245 }
goetz@6022 246 inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
goetz@6022 247 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
goetz@6022 248 }
goetz@6022 249 inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
goetz@6022 250 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
goetz@6022 251 }
goetz@6022 252 inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
goetz@6022 253 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
goetz@6022 254 }
goetz@6022 255
goetz@6022 256 //
goetz@6022 257 // Support for frame handling
goetz@6022 258 //
goetz@6022 259
goetz@6022 260 // some ABI-related functions
goetz@6022 261 void save_nonvolatile_gprs( Register dst_base, int offset);
goetz@6022 262 void restore_nonvolatile_gprs(Register src_base, int offset);
goetz@6022 263 void save_volatile_gprs( Register dst_base, int offset);
goetz@6022 264 void restore_volatile_gprs(Register src_base, int offset);
goetz@6022 265 void save_LR_CR( Register tmp); // tmp contains LR on return.
goetz@6022 266 void restore_LR_CR(Register tmp);
goetz@6022 267
goetz@6022 268 // Get current PC using bl-next-instruction trick.
goetz@6022 269 address get_PC_trash_LR(Register result);
goetz@6022 270
goetz@6022 271 // Resize current frame either relatively wrt to current SP or absolute.
goetz@6022 272 void resize_frame(Register offset, Register tmp);
goetz@6022 273 void resize_frame(int offset, Register tmp);
goetz@6022 274 void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
goetz@6022 275
goetz@6022 276 // Push a frame of size bytes.
goetz@6022 277 void push_frame(Register bytes, Register tmp);
goetz@6022 278
goetz@6022 279 // Push a frame of size `bytes'. No abi space provided.
goetz@6022 280 void push_frame(unsigned int bytes, Register tmp);
goetz@6022 281
goetz@6075 282 // Push a frame of size `bytes' plus abi_reg_args on top.
goetz@6075 283 void push_frame_reg_args(unsigned int bytes, Register tmp);
goetz@6022 284
goetz@6022 285 // Setup up a new C frame with a spill area for non-volatile GPRs and additional
goetz@6022 286 // space for local variables
goetz@6075 287 void push_frame_reg_args_nonvolatiles(unsigned int bytes, Register tmp);
goetz@6022 288
goetz@6022 289 // pop current C frame
goetz@6022 290 void pop_frame();
goetz@6022 291
goetz@6022 292 //
goetz@6022 293 // Calls
goetz@6022 294 //
goetz@6022 295
goetz@6022 296 private:
goetz@6022 297 address _last_calls_return_pc;
goetz@6022 298
goetz@6075 299 #if defined(ABI_ELFv2)
goetz@6075 300 // Generic version of a call to C function.
goetz@6075 301 // Updates and returns _last_calls_return_pc.
goetz@6075 302 address branch_to(Register function_entry, bool and_link);
goetz@6075 303 #else
goetz@6022 304 // Generic version of a call to C function via a function descriptor
goetz@6022 305 // with variable support for C calling conventions (TOC, ENV, etc.).
goetz@6022 306 // updates and returns _last_calls_return_pc.
goetz@6022 307 address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
goetz@6022 308 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
goetz@6075 309 #endif
goetz@6022 310
goetz@6022 311 public:
goetz@6022 312
goetz@6022 313 // Get the pc where the last call will return to. returns _last_calls_return_pc.
goetz@6022 314 inline address last_calls_return_pc();
goetz@6022 315
goetz@6075 316 #if defined(ABI_ELFv2)
goetz@6075 317 // Call a C function via a function descriptor and use full C
goetz@6075 318 // calling conventions. Updates and returns _last_calls_return_pc.
goetz@6075 319 address call_c(Register function_entry);
goetz@6075 320 // For tail calls: only branch, don't link, so callee returns to caller of this function.
goetz@6075 321 address call_c_and_return_to_caller(Register function_entry);
goetz@6075 322 address call_c(address function_entry, relocInfo::relocType rt);
goetz@6075 323 #else
goetz@6022 324 // Call a C function via a function descriptor and use full C
goetz@6022 325 // calling conventions. Updates and returns _last_calls_return_pc.
goetz@6022 326 address call_c(Register function_descriptor);
goetz@6059 327 // For tail calls: only branch, don't link, so callee returns to caller of this function.
goetz@6059 328 address call_c_and_return_to_caller(Register function_descriptor);
goetz@6022 329 address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
goetz@6022 330 address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
goetz@6022 331 Register toc);
goetz@6075 332 #endif
goetz@6022 333
goetz@6022 334 protected:
goetz@6022 335
goetz@6022 336 // It is imperative that all calls into the VM are handled via the
goetz@6022 337 // call_VM macros. They make sure that the stack linkage is setup
goetz@6022 338 // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
goetz@6022 339 // while call_VM_leaf's correspond to LEAF entry points.
goetz@6022 340 //
goetz@6022 341 // This is the base routine called by the different versions of
goetz@6022 342 // call_VM. The interpreter may customize this version by overriding
goetz@6022 343 // it for its purposes (e.g., to save/restore additional registers
goetz@6022 344 // when doing a VM call).
goetz@6022 345 //
goetz@6022 346 // If no last_java_sp is specified (noreg) then SP will be used instead.
goetz@6022 347 virtual void call_VM_base(
goetz@6022 348 // where an oop-result ends up if any; use noreg otherwise
goetz@6022 349 Register oop_result,
goetz@6022 350 // to set up last_Java_frame in stubs; use noreg otherwise
goetz@6022 351 Register last_java_sp,
goetz@6022 352 // the entry point
goetz@6022 353 address entry_point,
goetz@6022 354 // flag which indicates if exception should be checked
goetz@6059 355 bool check_exception = true
goetz@6022 356 );
goetz@6022 357
goetz@6022 358 // Support for VM calls. This is the base routine called by the
goetz@6022 359 // different versions of call_VM_leaf. The interpreter may customize
goetz@6022 360 // this version by overriding it for its purposes (e.g., to
goetz@6022 361 // save/restore additional registers when doing a VM call).
goetz@6022 362 void call_VM_leaf_base(address entry_point);
goetz@6022 363
goetz@6022 364 public:
goetz@6022 365 // Call into the VM.
goetz@6022 366 // Passes the thread pointer (in R3_ARG1) as a prepended argument.
goetz@6022 367 // Makes sure oop return values are visible to the GC.
goetz@6022 368 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
goetz@6022 369 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
goetz@6022 370 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
goetz@6966 371 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
goetz@6022 372 void call_VM_leaf(address entry_point);
goetz@6022 373 void call_VM_leaf(address entry_point, Register arg_1);
goetz@6022 374 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
goetz@6022 375 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
goetz@6022 376
goetz@6022 377 // Call a stub function via a function descriptor, but don't save
goetz@6022 378 // TOC before call, don't setup TOC and ENV for call, and don't
goetz@6022 379 // restore TOC after call. Updates and returns _last_calls_return_pc.
goetz@6022 380 inline address call_stub(Register function_entry);
goetz@6022 381 inline void call_stub_and_return_to(Register function_entry, Register return_pc);
goetz@6022 382
goetz@6022 383 //
goetz@6022 384 // Java utilities
goetz@6022 385 //
goetz@6022 386
goetz@6022 387 // Read from the polling page, its address is already in a register.
goetz@6022 388 inline void load_from_polling_page(Register polling_page_address, int offset = 0);
goetz@6022 389 // Check whether instruction is a read access to the polling page
goetz@6022 390 // which was emitted by load_from_polling_page(..).
goetz@6022 391 static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
goetz@6022 392 address* polling_address_ptr = NULL);
goetz@6022 393
goetz@6022 394 // Check whether instruction is a write access to the memory
goetz@6022 395 // serialization page realized by one of the instructions stw, stwu,
goetz@6022 396 // stwx, or stwux.
goetz@6022 397 static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
goetz@6022 398
goetz@6022 399 // Support for NULL-checks
goetz@6022 400 //
goetz@6022 401 // Generates code that causes a NULL OS exception if the content of reg is NULL.
goetz@6022 402 // If the accessed location is M[reg + offset] and the offset is known, provide the
goetz@6022 403 // offset. No explicit code generation is needed if the offset is within a certain
goetz@6022 404 // range (0 <= offset <= page_size).
goetz@6022 405
goetz@6022 406 // Stack overflow checking
goetz@6022 407 void bang_stack_with_offset(int offset);
goetz@6022 408
goetz@6022 409 // If instruction is a stack bang of the form ld, stdu, or
goetz@6022 410 // stdux, return the banged address. Otherwise, return 0.
goetz@6022 411 static address get_stack_bang_address(int instruction, void* ucontext);
goetz@6022 412
goetz@6022 413 // Atomics
goetz@6022 414 // CmpxchgX sets condition register to cmpX(current, compare).
goetz@6022 415 // (flag == ne) => (dest_current_value != compare_value), (!swapped)
goetz@6022 416 // (flag == eq) => (dest_current_value == compare_value), ( swapped)
goetz@6022 417 static inline bool cmpxchgx_hint_acquire_lock() { return true; }
goetz@6022 418 // The stxcx will probably not be succeeded by a releasing store.
goetz@6022 419 static inline bool cmpxchgx_hint_release_lock() { return false; }
goetz@6022 420 static inline bool cmpxchgx_hint_atomic_update() { return false; }
goetz@6022 421
goetz@6022 422 // Cmpxchg semantics
goetz@6022 423 enum {
goetz@6022 424 MemBarNone = 0,
goetz@6022 425 MemBarRel = 1,
goetz@6022 426 MemBarAcq = 2,
goetz@6022 427 MemBarFenceAfter = 4 // use powers of 2
goetz@6022 428 };
goetz@6022 429 void cmpxchgw(ConditionRegister flag,
goetz@6022 430 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
goetz@6022 431 int semantics, bool cmpxchgx_hint = false,
goetz@6022 432 Register int_flag_success = noreg, bool contention_hint = false);
goetz@6022 433 void cmpxchgd(ConditionRegister flag,
goetz@6022 434 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
goetz@6022 435 int semantics, bool cmpxchgx_hint = false,
goetz@6022 436 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
goetz@6022 437
goetz@6022 438 // interface method calling
goetz@6022 439 void lookup_interface_method(Register recv_klass,
goetz@6022 440 Register intf_klass,
goetz@6022 441 RegisterOrConstant itable_index,
goetz@6022 442 Register method_result,
goetz@6022 443 Register temp_reg, Register temp2_reg,
mdoerr@8547 444 Label& no_such_interface,
mdoerr@8547 445 bool return_method = true);
goetz@6022 446
goetz@6022 447 // virtual method calling
goetz@6022 448 void lookup_virtual_method(Register recv_klass,
goetz@6022 449 RegisterOrConstant vtable_index,
goetz@6022 450 Register method_result);
goetz@6022 451
goetz@6022 452 // Test sub_klass against super_klass, with fast and slow paths.
goetz@6022 453
goetz@6022 454 // The fast path produces a tri-state answer: yes / no / maybe-slow.
goetz@6022 455 // One of the three labels can be NULL, meaning take the fall-through.
goetz@6022 456 // If super_check_offset is -1, the value is loaded up from super_klass.
goetz@6022 457 // No registers are killed, except temp_reg and temp2_reg.
goetz@6022 458 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
goetz@6022 459 void check_klass_subtype_fast_path(Register sub_klass,
goetz@6022 460 Register super_klass,
goetz@6022 461 Register temp1_reg,
goetz@6022 462 Register temp2_reg,
goetz@6022 463 Label& L_success,
goetz@6022 464 Label& L_failure);
goetz@6022 465
goetz@6022 466 // The rest of the type check; must be wired to a corresponding fast path.
goetz@6022 467 // It does not repeat the fast path logic, so don't use it standalone.
goetz@6022 468 // The temp_reg can be noreg, if no temps are available.
goetz@6022 469 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
goetz@6022 470 // Updates the sub's secondary super cache as necessary.
goetz@6022 471 void check_klass_subtype_slow_path(Register sub_klass,
goetz@6022 472 Register super_klass,
goetz@6022 473 Register temp1_reg,
goetz@6022 474 Register temp2_reg,
goetz@6022 475 Label* L_success = NULL,
goetz@6022 476 Register result_reg = noreg);
goetz@6022 477
goetz@6022 478 // Simplified, combined version, good for typical uses.
goetz@6022 479 // Falls through on failure.
goetz@6022 480 void check_klass_subtype(Register sub_klass,
goetz@6022 481 Register super_klass,
goetz@6022 482 Register temp1_reg,
goetz@6022 483 Register temp2_reg,
goetz@6022 484 Label& L_success);
goetz@6022 485
goetz@6022 486 // Method handle support (JSR 292).
goetz@6022 487 void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
goetz@6022 488
goetz@6022 489 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
goetz@6022 490
goetz@6022 491 // Biased locking support
goetz@6022 492 // Upon entry,obj_reg must contain the target object, and mark_reg
goetz@6022 493 // must contain the target object's header.
goetz@6022 494 // Destroys mark_reg if an attempt is made to bias an anonymously
goetz@6022 495 // biased lock. In this case a failure will go either to the slow
goetz@6022 496 // case or fall through with the notEqual condition code set with
goetz@6022 497 // the expectation that the slow case in the runtime will be called.
goetz@6022 498 // In the fall-through case where the CAS-based lock is done,
goetz@6022 499 // mark_reg is not destroyed.
goetz@6022 500 void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
goetz@6022 501 Register temp2_reg, Label& done, Label* slow_case = NULL);
goetz@6022 502 // Upon entry, the base register of mark_addr must contain the oop.
goetz@6022 503 // Destroys temp_reg.
goetz@6022 504 // If allow_delay_slot_filling is set to true, the next instruction
goetz@6022 505 // emitted after this one will go in an annulled delay slot if the
goetz@6022 506 // biased locking exit case failed.
goetz@6022 507 void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
goetz@6022 508
goetz@6022 509 void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
goetz@6022 510 void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
goetz@6022 511
goetz@6022 512 // Support for serializing memory accesses between threads
goetz@6022 513 void serialize_memory(Register thread, Register tmp1, Register tmp2);
goetz@6022 514
goetz@6022 515 // GC barrier support.
goetz@6022 516 void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
goetz@6022 517 void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
goetz@6022 518
phh@8974 519 void resolve_jobject(Register value, Register tmp1, Register tmp2, bool needs_frame);
phh@8974 520
goetz@6079 521 #if INCLUDE_ALL_GCS
goetz@6022 522 // General G1 pre-barrier generator.
goetz@6022 523 void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
goetz@6022 524 Register Rtmp1, Register Rtmp2, bool needs_frame = false);
goetz@6022 525 // General G1 post-barrier generator
goetz@6022 526 void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
goetz@6022 527 Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
goetz@6079 528 #endif
goetz@6022 529
goetz@6022 530 // Support for managing the JavaThread pointer (i.e.; the reference to
goetz@6022 531 // thread-local information).
goetz@6022 532
goetz@6022 533 // Support for last Java frame (but use call_VM instead where possible):
goetz@6022 534 // access R16_thread->last_Java_sp.
goetz@6022 535 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
goetz@6022 536 void reset_last_Java_frame(void);
goetz@6022 537 void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
goetz@6022 538
goetz@6022 539 // Read vm result from thread: oop_result = R16_thread->result;
goetz@6022 540 void get_vm_result (Register oop_result);
goetz@6022 541 void get_vm_result_2(Register metadata_result);
goetz@6022 542
goetz@6022 543 static bool needs_explicit_null_check(intptr_t offset);
goetz@6022 544
goetz@6022 545 // Trap-instruction-based checks.
goetz@6022 546 // Range checks can be distinguished from zero checks as they check 32 bit,
goetz@6022 547 // zero checks all 64 bits (tw, td).
goetz@6022 548 inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
goetz@6022 549 static bool is_trap_null_check(int x) {
goetz@6022 550 return is_tdi(x, traptoEqual, -1/*any reg*/, 0) ||
goetz@6022 551 is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
goetz@6022 552 }
goetz@6022 553
goetz@6022 554 inline void trap_zombie_not_entrant();
goetz@6022 555 static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
goetz@6022 556
goetz@6022 557 inline void trap_should_not_reach_here();
goetz@6022 558 static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
goetz@6022 559
goetz@6022 560 inline void trap_ic_miss_check(Register a, Register b);
goetz@6022 561 static bool is_trap_ic_miss_check(int x) {
goetz@6022 562 return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
goetz@6022 563 }
goetz@6022 564
goetz@6022 565 // Implicit or explicit null check, jumps to static address exception_entry.
goetz@6022 566 inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
goetz@6022 567
goetz@6022 568 // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
goetz@6059 569 inline void load_with_trap_null_check(Register d, int si16, Register s1);
goetz@6022 570
goetz@6022 571 // Load heap oop and decompress. Loaded oop may not be null.
goetz@6022 572 inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
goetz@6076 573 inline void store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1,
goetz@6076 574 /*specify if d must stay uncompressed*/ Register tmp = noreg);
goetz@6022 575
goetz@6022 576 // Null allowed.
goetz@6022 577 inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
goetz@6022 578
goetz@6022 579 // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
goetz@6076 580 inline Register encode_heap_oop_not_null(Register d, Register src = noreg);
goetz@6022 581 inline void decode_heap_oop_not_null(Register d);
goetz@6022 582
goetz@6022 583 // Null allowed.
goetz@6022 584 inline void decode_heap_oop(Register d);
goetz@6022 585
goetz@6022 586 // Load/Store klass oop from klass field. Compress.
goetz@6022 587 void load_klass(Register dst, Register src);
goetz@6022 588 void load_klass_with_trap_null_check(Register dst, Register src);
goetz@6022 589 void store_klass(Register dst_oop, Register klass, Register tmp = R0);
goetz@6076 590 void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.
goetz@6041 591 static int instr_size_for_decode_klass_not_null();
goetz@6022 592 void decode_klass_not_null(Register dst, Register src = noreg);
goetz@6022 593 void encode_klass_not_null(Register dst, Register src = noreg);
goetz@6022 594
goetz@6022 595 // Load common heap base into register.
goetz@6022 596 void reinit_heapbase(Register d, Register tmp = noreg);
goetz@6022 597
goetz@6022 598 // SIGTRAP-based range checks for arrays.
goetz@6022 599 inline void trap_range_check_l(Register a, Register b);
goetz@6022 600 inline void trap_range_check_l(Register a, int si16);
goetz@6022 601 static bool is_trap_range_check_l(int x) {
goetz@6022 602 return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
goetz@6022 603 is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) );
goetz@6022 604 }
goetz@6022 605 inline void trap_range_check_le(Register a, int si16);
goetz@6022 606 static bool is_trap_range_check_le(int x) {
goetz@6022 607 return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
goetz@6022 608 }
goetz@6022 609 inline void trap_range_check_g(Register a, int si16);
goetz@6022 610 static bool is_trap_range_check_g(int x) {
goetz@6022 611 return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
goetz@6022 612 }
goetz@6022 613 inline void trap_range_check_ge(Register a, Register b);
goetz@6022 614 inline void trap_range_check_ge(Register a, int si16);
goetz@6022 615 static bool is_trap_range_check_ge(int x) {
goetz@6022 616 return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
goetz@6022 617 is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) );
goetz@6022 618 }
goetz@6022 619 static bool is_trap_range_check(int x) {
goetz@6022 620 return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
goetz@6022 621 is_trap_range_check_g(x) || is_trap_range_check_ge(x);
goetz@6022 622 }
goetz@6022 623
goetz@6059 624 void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
goetz@6059 625
goetz@6022 626 // Needle of length 1.
goetz@6022 627 void string_indexof_1(Register result, Register haystack, Register haycnt,
goetz@6022 628 Register needle, jchar needleChar,
goetz@6022 629 Register tmp1, Register tmp2);
goetz@6022 630 // General indexof, eventually with constant needle length.
goetz@6022 631 void string_indexof(Register result, Register haystack, Register haycnt,
goetz@6022 632 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
goetz@6022 633 Register tmp1, Register tmp2, Register tmp3, Register tmp4);
goetz@6022 634 void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
goetz@6022 635 Register result_reg, Register tmp_reg);
goetz@6022 636 void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
goetz@6022 637 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
goetz@6022 638 Register tmp5_reg);
goetz@6022 639 void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
goetz@6022 640 Register tmp1_reg, Register tmp2_reg);
goetz@6022 641
gromero@8735 642 // CRC32 Intrinsics.
gromero@8735 643 void load_reverse_32(Register dst, Register src);
gromero@8735 644 int crc32_table_columns(Register table, Register tc0, Register tc1, Register tc2, Register tc3);
gromero@8735 645 void fold_byte_crc32(Register crc, Register val, Register table, Register tmp);
gromero@8735 646 void fold_8bit_crc32(Register crc, Register table, Register tmp);
gromero@8735 647 void update_byte_crc32(Register crc, Register val, Register table);
gromero@8735 648 void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
gromero@8735 649 Register data, bool loopAlignment, bool invertCRC);
gromero@8735 650 void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
gromero@8735 651 Register t0, Register t1, Register t2, Register t3,
gromero@8735 652 Register tc0, Register tc1, Register tc2, Register tc3);
gromero@8735 653 void kernel_crc32_2word(Register crc, Register buf, Register len, Register table,
gromero@8735 654 Register t0, Register t1, Register t2, Register t3,
gromero@8735 655 Register tc0, Register tc1, Register tc2, Register tc3);
gromero@8735 656 void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
gromero@8735 657 Register t0, Register t1, Register t2, Register t3,
gromero@8735 658 Register tc0, Register tc1, Register tc2, Register tc3);
gromero@8735 659 void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
gromero@8735 660 Register t0, Register t1, Register t2, Register t3);
mdoerr@8736 661 void kernel_crc32_1word_vpmsumd(Register crc, Register buf, Register len, Register table,
mdoerr@8736 662 Register constants, Register barretConstants,
mdoerr@8736 663 Register t0, Register t1, Register t2, Register t3, Register t4);
mdoerr@8736 664 void kernel_crc32_1word_aligned(Register crc, Register buf, Register len,
mdoerr@8736 665 Register constants, Register barretConstants,
mdoerr@8736 666 Register t0, Register t1, Register t2);
mdoerr@8736 667
gromero@8735 668 void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp);
gromero@8735 669
ogatak@9005 670 // SHA-2 auxiliary functions and public interfaces
ogatak@9005 671 private:
ogatak@9005 672 void sha256_deque(const VectorRegister src,
ogatak@9005 673 const VectorRegister dst1, const VectorRegister dst2, const VectorRegister dst3);
ogatak@9005 674 void sha256_load_h_vec(const VectorRegister a, const VectorRegister e, const Register hptr);
ogatak@9005 675 void sha256_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
ogatak@9005 676 void sha256_load_w_plus_k_vec(const Register buf_in, const VectorRegister* ws,
ogatak@9005 677 const int total_ws, const Register k, const VectorRegister* kpws,
ogatak@9005 678 const int total_kpws);
ogatak@9005 679 void sha256_calc_4w(const VectorRegister w0, const VectorRegister w1,
ogatak@9005 680 const VectorRegister w2, const VectorRegister w3, const VectorRegister kpw0,
ogatak@9005 681 const VectorRegister kpw1, const VectorRegister kpw2, const VectorRegister kpw3,
ogatak@9005 682 const Register j, const Register k);
ogatak@9005 683 void sha256_update_sha_state(const VectorRegister a, const VectorRegister b,
ogatak@9005 684 const VectorRegister c, const VectorRegister d, const VectorRegister e,
ogatak@9005 685 const VectorRegister f, const VectorRegister g, const VectorRegister h,
ogatak@9005 686 const Register hptr);
ogatak@9005 687
ogatak@9005 688 void sha512_load_w_vec(const Register buf_in, const VectorRegister* ws, const int total_ws);
ogatak@9005 689 void sha512_update_sha_state(const Register state, const VectorRegister* hs, const int total_hs);
ogatak@9005 690 void sha512_round(const VectorRegister* hs, const int total_hs, int& h_cnt, const VectorRegister kpw);
ogatak@9005 691 void sha512_load_h_vec(const Register state, const VectorRegister* hs, const int total_hs);
ogatak@9005 692 void sha512_calc_2w(const VectorRegister w0, const VectorRegister w1,
ogatak@9005 693 const VectorRegister w2, const VectorRegister w3,
ogatak@9005 694 const VectorRegister w4, const VectorRegister w5,
ogatak@9005 695 const VectorRegister w6, const VectorRegister w7,
ogatak@9005 696 const VectorRegister kpw0, const VectorRegister kpw1, const Register j,
ogatak@9005 697 const VectorRegister vRb, const Register k);
ogatak@9005 698
ogatak@9005 699 public:
ogatak@9005 700 void sha256(bool multi_block);
ogatak@9005 701 void sha512(bool multi_block);
ogatak@9005 702
ogatak@9005 703
goetz@6022 704 //
goetz@6022 705 // Debugging
goetz@6022 706 //
goetz@6022 707
goetz@6022 708 // assert on cr0
goetz@6022 709 void asm_assert(bool check_equal, const char* msg, int id);
goetz@6022 710 void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
goetz@6022 711 void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
goetz@6022 712
goetz@6022 713 private:
goetz@6022 714 void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
goetz@6022 715 const char* msg, int id);
goetz@6022 716
goetz@6022 717 public:
goetz@6022 718
goetz@6022 719 void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
goetz@6022 720 asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
goetz@6022 721 }
goetz@6022 722 void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
goetz@6022 723 asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
goetz@6022 724 }
goetz@6022 725
goetz@6022 726 // Verify R16_thread contents.
goetz@6022 727 void verify_thread();
goetz@6022 728
goetz@6022 729 // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
goetz@6022 730 void verify_oop(Register reg, const char* s = "broken oop");
goetz@6022 731
goetz@6022 732 // TODO: verify method and klass metadata (compare against vptr?)
goetz@6022 733 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
goetz@6059 734 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
goetz@6022 735
goetz@6075 736 // Convenience method returning function entry. For the ELFv1 case
goetz@6075 737 // creates function descriptor at the current address and returs
goetz@6075 738 // the pointer to it. For the ELFv2 case returns the current address.
goetz@6075 739 inline address function_entry();
goetz@6075 740
goetz@6022 741 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
goetz@6022 742 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
goetz@6022 743
goetz@6022 744 private:
goetz@6022 745
goetz@6022 746 enum {
goetz@6022 747 stop_stop = 0,
goetz@6022 748 stop_untested = 1,
goetz@6022 749 stop_unimplemented = 2,
goetz@6022 750 stop_shouldnotreachhere = 3,
goetz@6022 751 stop_end = 4
goetz@6022 752 };
goetz@6022 753 void stop(int type, const char* msg, int id);
goetz@6022 754
goetz@6022 755 public:
goetz@6022 756 // Prints msg, dumps registers and stops execution.
goetz@6022 757 void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
goetz@6022 758 void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
goetz@6022 759 void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
goetz@6022 760 void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
goetz@6022 761
goetz@6022 762 void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
goetz@6022 763 };
goetz@6022 764
goetz@6076 765 // class SkipIfEqualZero:
goetz@6076 766 //
goetz@6076 767 // Instantiating this class will result in assembly code being output that will
goetz@6076 768 // jump around any code emitted between the creation of the instance and it's
goetz@6076 769 // automatic destruction at the end of a scope block, depending on the value of
goetz@6076 770 // the flag passed to the constructor, which will be checked at run-time.
goetz@6076 771 class SkipIfEqualZero : public StackObj {
goetz@6076 772 private:
goetz@6076 773 MacroAssembler* _masm;
goetz@6076 774 Label _label;
goetz@6076 775
goetz@6076 776 public:
goetz@6076 777 // 'Temp' is a temp register that this object can use (and trash).
goetz@6076 778 explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
goetz@6076 779 ~SkipIfEqualZero();
goetz@6076 780 };
goetz@6076 781
goetz@6022 782 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP