annotate src/cpu/sparc/vm/sharedRuntime_sparc.cpp @ 991:dcf03e02b020

6879902: CTW failure jdk6_18/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp:845 Summary: For signatures with a large number of arguments the offset for the float store becomes too big and does not fit in 13-bit. Reviewed-by: kvn, never
author twisti
date Tue, 06 Oct 2009 02:11:49 -0700
parents 6b2273dd6fa9
children 1ce3281a8e93
rev   line source
duke@0 1 /*
jrose@689 2 * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 #include "incls/_precompiled.incl"
duke@0 26 #include "incls/_sharedRuntime_sparc.cpp.incl"
duke@0 27
duke@0 28 #define __ masm->
duke@0 29
duke@0 30 #ifdef COMPILER2
duke@0 31 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
duke@0 32 #endif // COMPILER2
duke@0 33
duke@0 34 DeoptimizationBlob* SharedRuntime::_deopt_blob;
duke@0 35 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
duke@0 36 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
duke@0 37 RuntimeStub* SharedRuntime::_wrong_method_blob;
duke@0 38 RuntimeStub* SharedRuntime::_ic_miss_blob;
duke@0 39 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
duke@0 40 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
duke@0 41 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
duke@0 42
duke@0 43 class RegisterSaver {
duke@0 44
duke@0 45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
duke@0 46 // The Oregs are problematic. In the 32bit build the compiler can
duke@0 47 // have O registers live with 64 bit quantities. A window save will
duke@0 48 // cut the heads off of the registers. We have to do a very extensive
duke@0 49 // stack dance to save and restore these properly.
duke@0 50
duke@0 51 // Note that the Oregs problem only exists if we block at either a polling
duke@0 52 // page exception a compiled code safepoint that was not originally a call
duke@0 53 // or deoptimize following one of these kinds of safepoints.
duke@0 54
duke@0 55 // Lots of registers to save. For all builds, a window save will preserve
duke@0 56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit
duke@0 57 // builds a window-save will preserve the %o registers. In the LION build
duke@0 58 // we need to save the 64-bit %o registers which requires we save them
duke@0 59 // before the window-save (as then they become %i registers and get their
duke@0 60 // heads chopped off on interrupt). We have to save some %g registers here
duke@0 61 // as well.
duke@0 62 enum {
duke@0 63 // This frame's save area. Includes extra space for the native call:
duke@0 64 // vararg's layout space and the like. Briefly holds the caller's
duke@0 65 // register save area.
duke@0 66 call_args_area = frame::register_save_words_sp_offset +
duke@0 67 frame::memory_parameter_word_sp_offset*wordSize,
duke@0 68 // Make sure save locations are always 8 byte aligned.
duke@0 69 // can't use round_to because it doesn't produce compile time constant
duke@0 70 start_of_extra_save_area = ((call_args_area + 7) & ~7),
duke@0 71 g1_offset = start_of_extra_save_area, // g-regs needing saving
duke@0 72 g3_offset = g1_offset+8,
duke@0 73 g4_offset = g3_offset+8,
duke@0 74 g5_offset = g4_offset+8,
duke@0 75 o0_offset = g5_offset+8,
duke@0 76 o1_offset = o0_offset+8,
duke@0 77 o2_offset = o1_offset+8,
duke@0 78 o3_offset = o2_offset+8,
duke@0 79 o4_offset = o3_offset+8,
duke@0 80 o5_offset = o4_offset+8,
duke@0 81 start_of_flags_save_area = o5_offset+8,
duke@0 82 ccr_offset = start_of_flags_save_area,
duke@0 83 fsr_offset = ccr_offset + 8,
duke@0 84 d00_offset = fsr_offset+8, // Start of float save area
duke@0 85 register_save_size = d00_offset+8*32
duke@0 86 };
duke@0 87
duke@0 88
duke@0 89 public:
duke@0 90
duke@0 91 static int Oexception_offset() { return o0_offset; };
duke@0 92 static int G3_offset() { return g3_offset; };
duke@0 93 static int G5_offset() { return g5_offset; };
duke@0 94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
duke@0 95 static void restore_live_registers(MacroAssembler* masm);
duke@0 96
duke@0 97 // During deoptimization only the result register need to be restored
duke@0 98 // all the other values have already been extracted.
duke@0 99
duke@0 100 static void restore_result_registers(MacroAssembler* masm);
duke@0 101 };
duke@0 102
duke@0 103 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
duke@0 104 // Record volatile registers as callee-save values in an OopMap so their save locations will be
duke@0 105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
duke@0 106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
duke@0 107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
duke@0 108 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
duke@0 109 int i;
duke@0 110 // Always make the frame size 16 bytr aligned.
duke@0 111 int frame_size = round_to(additional_frame_words + register_save_size, 16);
duke@0 112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
duke@0 113 int frame_size_in_slots = frame_size / sizeof(jint);
duke@0 114 // CodeBlob frame size is in words.
duke@0 115 *total_frame_words = frame_size / wordSize;
duke@0 116 // OopMap* map = new OopMap(*total_frame_words, 0);
duke@0 117 OopMap* map = new OopMap(frame_size_in_slots, 0);
duke@0 118
duke@0 119 #if !defined(_LP64)
duke@0 120
duke@0 121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
duke@0 122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@0 123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@0 124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
duke@0 125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
duke@0 126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
duke@0 127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
duke@0 128 #endif /* _LP64 */
duke@0 129
duke@0 130 __ save(SP, -frame_size, SP);
duke@0 131
duke@0 132 #ifndef _LP64
duke@0 133 // Reload the 64 bit Oregs. Although they are now Iregs we load them
duke@0 134 // to Oregs here to avoid interrupts cutting off their heads
duke@0 135
duke@0 136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@0 137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@0 138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
duke@0 139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
duke@0 140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
duke@0 141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
duke@0 142
duke@0 143 __ stx(O0, SP, o0_offset+STACK_BIAS);
duke@0 144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
duke@0 145
duke@0 146 __ stx(O1, SP, o1_offset+STACK_BIAS);
duke@0 147
duke@0 148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
duke@0 149
duke@0 150 __ stx(O2, SP, o2_offset+STACK_BIAS);
duke@0 151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
duke@0 152
duke@0 153 __ stx(O3, SP, o3_offset+STACK_BIAS);
duke@0 154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
duke@0 155
duke@0 156 __ stx(O4, SP, o4_offset+STACK_BIAS);
duke@0 157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
duke@0 158
duke@0 159 __ stx(O5, SP, o5_offset+STACK_BIAS);
duke@0 160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
duke@0 161 #endif /* _LP64 */
duke@0 162
coleenp@108 163
coleenp@108 164 #ifdef _LP64
coleenp@108 165 int debug_offset = 0;
coleenp@108 166 #else
coleenp@108 167 int debug_offset = 4;
coleenp@108 168 #endif
duke@0 169 // Save the G's
duke@0 170 __ stx(G1, SP, g1_offset+STACK_BIAS);
coleenp@108 171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
duke@0 172
duke@0 173 __ stx(G3, SP, g3_offset+STACK_BIAS);
coleenp@108 174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
duke@0 175
duke@0 176 __ stx(G4, SP, g4_offset+STACK_BIAS);
coleenp@108 177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
duke@0 178
duke@0 179 __ stx(G5, SP, g5_offset+STACK_BIAS);
coleenp@108 180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
duke@0 181
duke@0 182 // This is really a waste but we'll keep things as they were for now
duke@0 183 if (true) {
duke@0 184 #ifndef _LP64
duke@0 185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
duke@0 186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
duke@0 187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
duke@0 188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
duke@0 189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
duke@0 190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
duke@0 191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
duke@0 192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
duke@0 193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
duke@0 194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
coleenp@108 195 #endif /* _LP64 */
duke@0 196 }
duke@0 197
duke@0 198
duke@0 199 // Save the flags
duke@0 200 __ rdccr( G5 );
duke@0 201 __ stx(G5, SP, ccr_offset+STACK_BIAS);
duke@0 202 __ stxfsr(SP, fsr_offset+STACK_BIAS);
duke@0 203
duke@0 204 // Save all the FP registers
duke@0 205 int offset = d00_offset;
duke@0 206 for( int i=0; i<64; i+=2 ) {
duke@0 207 FloatRegister f = as_FloatRegister(i);
duke@0 208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS);
duke@0 209 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
duke@0 210 if (true) {
duke@0 211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
duke@0 212 }
duke@0 213 offset += sizeof(double);
duke@0 214 }
duke@0 215
duke@0 216 // And we're done.
duke@0 217
duke@0 218 return map;
duke@0 219 }
duke@0 220
duke@0 221
duke@0 222 // Pop the current frame and restore all the registers that we
duke@0 223 // saved.
duke@0 224 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
duke@0 225
duke@0 226 // Restore all the FP registers
duke@0 227 for( int i=0; i<64; i+=2 ) {
duke@0 228 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
duke@0 229 }
duke@0 230
duke@0 231 __ ldx(SP, ccr_offset+STACK_BIAS, G1);
duke@0 232 __ wrccr (G1) ;
duke@0 233
duke@0 234 // Restore the G's
duke@0 235 // Note that G2 (AKA GThread) must be saved and restored separately.
duke@0 236 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
duke@0 237
duke@0 238 __ ldx(SP, g1_offset+STACK_BIAS, G1);
duke@0 239 __ ldx(SP, g3_offset+STACK_BIAS, G3);
duke@0 240 __ ldx(SP, g4_offset+STACK_BIAS, G4);
duke@0 241 __ ldx(SP, g5_offset+STACK_BIAS, G5);
duke@0 242
duke@0 243
duke@0 244 #if !defined(_LP64)
duke@0 245 // Restore the 64-bit O's.
duke@0 246 __ ldx(SP, o0_offset+STACK_BIAS, O0);
duke@0 247 __ ldx(SP, o1_offset+STACK_BIAS, O1);
duke@0 248 __ ldx(SP, o2_offset+STACK_BIAS, O2);
duke@0 249 __ ldx(SP, o3_offset+STACK_BIAS, O3);
duke@0 250 __ ldx(SP, o4_offset+STACK_BIAS, O4);
duke@0 251 __ ldx(SP, o5_offset+STACK_BIAS, O5);
duke@0 252
duke@0 253 // And temporarily place them in TLS
duke@0 254
duke@0 255 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@0 256 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@0 257 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
duke@0 258 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
duke@0 259 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
duke@0 260 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
duke@0 261 #endif /* _LP64 */
duke@0 262
duke@0 263 // Restore flags
duke@0 264
duke@0 265 __ ldxfsr(SP, fsr_offset+STACK_BIAS);
duke@0 266
duke@0 267 __ restore();
duke@0 268
duke@0 269 #if !defined(_LP64)
duke@0 270 // Now reload the 64bit Oregs after we've restore the window.
duke@0 271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@0 272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@0 273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
duke@0 274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
duke@0 275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
duke@0 276 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
duke@0 277 #endif /* _LP64 */
duke@0 278
duke@0 279 }
duke@0 280
duke@0 281 // Pop the current frame and restore the registers that might be holding
duke@0 282 // a result.
duke@0 283 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
duke@0 284
duke@0 285 #if !defined(_LP64)
duke@0 286 // 32bit build returns longs in G1
duke@0 287 __ ldx(SP, g1_offset+STACK_BIAS, G1);
duke@0 288
duke@0 289 // Retrieve the 64-bit O's.
duke@0 290 __ ldx(SP, o0_offset+STACK_BIAS, O0);
duke@0 291 __ ldx(SP, o1_offset+STACK_BIAS, O1);
duke@0 292 // and save to TLS
duke@0 293 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
duke@0 294 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
duke@0 295 #endif /* _LP64 */
duke@0 296
duke@0 297 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
duke@0 298
duke@0 299 __ restore();
duke@0 300
duke@0 301 #if !defined(_LP64)
duke@0 302 // Now reload the 64bit Oregs after we've restore the window.
duke@0 303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
duke@0 304 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
duke@0 305 #endif /* _LP64 */
duke@0 306
duke@0 307 }
duke@0 308
duke@0 309 // The java_calling_convention describes stack locations as ideal slots on
duke@0 310 // a frame with no abi restrictions. Since we must observe abi restrictions
duke@0 311 // (like the placement of the register window) the slots must be biased by
duke@0 312 // the following value.
duke@0 313 static int reg2offset(VMReg r) {
duke@0 314 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
duke@0 315 }
duke@0 316
duke@0 317 // ---------------------------------------------------------------------------
duke@0 318 // Read the array of BasicTypes from a signature, and compute where the
duke@0 319 // arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
duke@0 320 // quantities. Values less than VMRegImpl::stack0 are registers, those above
duke@0 321 // refer to 4-byte stack slots. All stack slots are based off of the window
duke@0 322 // top. VMRegImpl::stack0 refers to the first slot past the 16-word window,
duke@0 323 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
duke@0 324 // values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
duke@0 325 // integer registers. Values 64-95 are the (32-bit only) float registers.
duke@0 326 // Each 32-bit quantity is given its own number, so the integer registers
duke@0 327 // (in either 32- or 64-bit builds) use 2 numbers. For example, there is
duke@0 328 // an O0-low and an O0-high. Essentially, all int register numbers are doubled.
duke@0 329
duke@0 330 // Register results are passed in O0-O5, for outgoing call arguments. To
duke@0 331 // convert to incoming arguments, convert all O's to I's. The regs array
duke@0 332 // refer to the low and hi 32-bit words of 64-bit registers or stack slots.
duke@0 333 // If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
duke@0 334 // 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was
duke@0 335 // passed (used as a placeholder for the other half of longs and doubles in
duke@0 336 // the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is
duke@0 337 // regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
duke@0 338 // Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
duke@0 339 // == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
duke@0 340 // same VMRegPair.
duke@0 341
duke@0 342 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
duke@0 343 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
duke@0 344 // units regardless of build.
duke@0 345
duke@0 346
duke@0 347 // ---------------------------------------------------------------------------
duke@0 348 // The compiled Java calling convention. The Java convention always passes
duke@0 349 // 64-bit values in adjacent aligned locations (either registers or stack),
duke@0 350 // floats in float registers and doubles in aligned float pairs. Values are
duke@0 351 // packed in the registers. There is no backing varargs store for values in
duke@0 352 // registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be
duke@0 353 // passed in I's, because longs in I's get their heads chopped off at
duke@0 354 // interrupt).
duke@0 355 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
duke@0 356 VMRegPair *regs,
duke@0 357 int total_args_passed,
duke@0 358 int is_outgoing) {
duke@0 359 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
duke@0 360
duke@0 361 // Convention is to pack the first 6 int/oop args into the first 6 registers
duke@0 362 // (I0-I5), extras spill to the stack. Then pack the first 8 float args
duke@0 363 // into F0-F7, extras spill to the stack. Then pad all register sets to
duke@0 364 // align. Then put longs and doubles into the same registers as they fit,
duke@0 365 // else spill to the stack.
duke@0 366 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
duke@0 367 const int flt_reg_max = 8;
duke@0 368 //
duke@0 369 // Where 32-bit 1-reg longs start being passed
duke@0 370 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg.
duke@0 371 // So make it look like we've filled all the G regs that c2 wants to use.
duke@0 372 Register g_reg = TieredCompilation ? noreg : G1;
duke@0 373
duke@0 374 // Count int/oop and float args. See how many stack slots we'll need and
duke@0 375 // where the longs & doubles will go.
duke@0 376 int int_reg_cnt = 0;
duke@0 377 int flt_reg_cnt = 0;
duke@0 378 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2);
duke@0 379 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots();
duke@0 380 int stk_reg_pairs = 0;
duke@0 381 for (int i = 0; i < total_args_passed; i++) {
duke@0 382 switch (sig_bt[i]) {
duke@0 383 case T_LONG: // LP64, longs compete with int args
duke@0 384 assert(sig_bt[i+1] == T_VOID, "");
duke@0 385 #ifdef _LP64
duke@0 386 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
duke@0 387 #endif
duke@0 388 break;
duke@0 389 case T_OBJECT:
duke@0 390 case T_ARRAY:
duke@0 391 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
duke@0 392 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
duke@0 393 #ifndef _LP64
duke@0 394 else stk_reg_pairs++;
duke@0 395 #endif
duke@0 396 break;
duke@0 397 case T_INT:
duke@0 398 case T_SHORT:
duke@0 399 case T_CHAR:
duke@0 400 case T_BYTE:
duke@0 401 case T_BOOLEAN:
duke@0 402 if (int_reg_cnt < int_reg_max) int_reg_cnt++;
duke@0 403 else stk_reg_pairs++;
duke@0 404 break;
duke@0 405 case T_FLOAT:
duke@0 406 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++;
duke@0 407 else stk_reg_pairs++;
duke@0 408 break;
duke@0 409 case T_DOUBLE:
duke@0 410 assert(sig_bt[i+1] == T_VOID, "");
duke@0 411 break;
duke@0 412 case T_VOID:
duke@0 413 break;
duke@0 414 default:
duke@0 415 ShouldNotReachHere();
duke@0 416 }
duke@0 417 }
duke@0 418
duke@0 419 // This is where the longs/doubles start on the stack.
duke@0 420 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round
duke@0 421
duke@0 422 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only
duke@0 423 int flt_reg_pairs = (flt_reg_cnt+1) & ~1;
duke@0 424
duke@0 425 // int stk_reg = frame::register_save_words*(wordSize>>2);
duke@0 426 // int stk_reg = SharedRuntime::out_preserve_stack_slots();
duke@0 427 int stk_reg = 0;
duke@0 428 int int_reg = 0;
duke@0 429 int flt_reg = 0;
duke@0 430
duke@0 431 // Now do the signature layout
duke@0 432 for (int i = 0; i < total_args_passed; i++) {
duke@0 433 switch (sig_bt[i]) {
duke@0 434 case T_INT:
duke@0 435 case T_SHORT:
duke@0 436 case T_CHAR:
duke@0 437 case T_BYTE:
duke@0 438 case T_BOOLEAN:
duke@0 439 #ifndef _LP64
duke@0 440 case T_OBJECT:
duke@0 441 case T_ARRAY:
duke@0 442 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
duke@0 443 #endif // _LP64
duke@0 444 if (int_reg < int_reg_max) {
duke@0 445 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
duke@0 446 regs[i].set1(r->as_VMReg());
duke@0 447 } else {
duke@0 448 regs[i].set1(VMRegImpl::stack2reg(stk_reg++));
duke@0 449 }
duke@0 450 break;
duke@0 451
duke@0 452 #ifdef _LP64
duke@0 453 case T_OBJECT:
duke@0 454 case T_ARRAY:
duke@0 455 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
duke@0 456 if (int_reg < int_reg_max) {
duke@0 457 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
duke@0 458 regs[i].set2(r->as_VMReg());
duke@0 459 } else {
duke@0 460 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
duke@0 461 stk_reg_pairs += 2;
duke@0 462 }
duke@0 463 break;
duke@0 464 #endif // _LP64
duke@0 465
duke@0 466 case T_LONG:
duke@0 467 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
duke@0 468 #ifdef _LP64
duke@0 469 if (int_reg < int_reg_max) {
duke@0 470 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
duke@0 471 regs[i].set2(r->as_VMReg());
duke@0 472 } else {
duke@0 473 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
duke@0 474 stk_reg_pairs += 2;
duke@0 475 }
duke@0 476 #else
never@297 477 #ifdef COMPILER2
duke@0 478 // For 32-bit build, can't pass longs in O-regs because they become
duke@0 479 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
duke@0 480 // spare and available. This convention isn't used by the Sparc ABI or
duke@0 481 // anywhere else. If we're tiered then we don't use G-regs because c1
never@297 482 // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
duke@0 483 // G0: zero
duke@0 484 // G1: 1st Long arg
duke@0 485 // G2: global allocated to TLS
duke@0 486 // G3: used in inline cache check
duke@0 487 // G4: 2nd Long arg
duke@0 488 // G5: used in inline cache check
duke@0 489 // G6: used by OS
duke@0 490 // G7: used by OS
duke@0 491
duke@0 492 if (g_reg == G1) {
duke@0 493 regs[i].set2(G1->as_VMReg()); // This long arg in G1
duke@0 494 g_reg = G4; // Where the next arg goes
duke@0 495 } else if (g_reg == G4) {
duke@0 496 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4
duke@0 497 g_reg = noreg; // No more longs in registers
duke@0 498 } else {
duke@0 499 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
duke@0 500 stk_reg_pairs += 2;
duke@0 501 }
duke@0 502 #else // COMPILER2
duke@0 503 if (int_reg_pairs + 1 < int_reg_max) {
duke@0 504 if (is_outgoing) {
duke@0 505 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg());
duke@0 506 } else {
duke@0 507 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg());
duke@0 508 }
duke@0 509 int_reg_pairs += 2;
duke@0 510 } else {
duke@0 511 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
duke@0 512 stk_reg_pairs += 2;
duke@0 513 }
duke@0 514 #endif // COMPILER2
never@297 515 #endif // _LP64
duke@0 516 break;
duke@0 517
duke@0 518 case T_FLOAT:
duke@0 519 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg());
duke@0 520 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++));
duke@0 521 break;
duke@0 522 case T_DOUBLE:
duke@0 523 assert(sig_bt[i+1] == T_VOID, "expecting half");
duke@0 524 if (flt_reg_pairs + 1 < flt_reg_max) {
duke@0 525 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
duke@0 526 flt_reg_pairs += 2;
duke@0 527 } else {
duke@0 528 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
duke@0 529 stk_reg_pairs += 2;
duke@0 530 }
duke@0 531 break;
duke@0 532 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles
duke@0 533 default:
duke@0 534 ShouldNotReachHere();
duke@0 535 }
duke@0 536 }
duke@0 537
duke@0 538 // retun the amount of stack space these arguments will need.
duke@0 539 return stk_reg_pairs;
duke@0 540
duke@0 541 }
duke@0 542
twisti@991 543 // Helper class mostly to avoid passing masm everywhere, and handle
twisti@991 544 // store displacement overflow logic.
duke@0 545 class AdapterGenerator {
duke@0 546 MacroAssembler *masm;
duke@0 547 Register Rdisp;
duke@0 548 void set_Rdisp(Register r) { Rdisp = r; }
duke@0 549
duke@0 550 void patch_callers_callsite();
duke@0 551 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
duke@0 552
duke@0 553 // base+st_off points to top of argument
duke@0 554 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
duke@0 555 int next_arg_offset(const int st_off) {
duke@0 556 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
duke@0 557 }
duke@0 558
twisti@991 559 int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
twisti@991 560 int next_tag_offset(const int st_off) {
twisti@991 561 return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
twisti@991 562 }
twisti@991 563
twisti@991 564 // Argument slot values may be loaded first into a register because
twisti@991 565 // they might not fit into displacement.
twisti@991 566 RegisterOrConstant arg_slot(const int st_off);
twisti@991 567 RegisterOrConstant next_arg_slot(const int st_off);
twisti@991 568
twisti@991 569 RegisterOrConstant tag_slot(const int st_off);
twisti@991 570 RegisterOrConstant next_tag_slot(const int st_off);
duke@0 571
duke@0 572 // Stores long into offset pointed to by base
duke@0 573 void store_c2i_long(Register r, Register base,
duke@0 574 const int st_off, bool is_stack);
duke@0 575 void store_c2i_object(Register r, Register base,
duke@0 576 const int st_off);
duke@0 577 void store_c2i_int(Register r, Register base,
duke@0 578 const int st_off);
duke@0 579 void store_c2i_double(VMReg r_2,
duke@0 580 VMReg r_1, Register base, const int st_off);
duke@0 581 void store_c2i_float(FloatRegister f, Register base,
duke@0 582 const int st_off);
duke@0 583
duke@0 584 public:
duke@0 585 void gen_c2i_adapter(int total_args_passed,
duke@0 586 // VMReg max_arg,
duke@0 587 int comp_args_on_stack, // VMRegStackSlots
duke@0 588 const BasicType *sig_bt,
duke@0 589 const VMRegPair *regs,
duke@0 590 Label& skip_fixup);
duke@0 591 void gen_i2c_adapter(int total_args_passed,
duke@0 592 // VMReg max_arg,
duke@0 593 int comp_args_on_stack, // VMRegStackSlots
duke@0 594 const BasicType *sig_bt,
duke@0 595 const VMRegPair *regs);
duke@0 596
duke@0 597 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
duke@0 598 };
duke@0 599
duke@0 600
duke@0 601 // Patch the callers callsite with entry to compiled code if it exists.
duke@0 602 void AdapterGenerator::patch_callers_callsite() {
duke@0 603 Label L;
duke@0 604 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
duke@0 605 __ br_null(G3_scratch, false, __ pt, L);
duke@0 606 // Schedule the branch target address early.
duke@0 607 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
duke@0 608 // Call into the VM to patch the caller, then jump to compiled callee
duke@0 609 __ save_frame(4); // Args in compiled layout; do not blow them
duke@0 610
duke@0 611 // Must save all the live Gregs the list is:
duke@0 612 // G1: 1st Long arg (32bit build)
duke@0 613 // G2: global allocated to TLS
duke@0 614 // G3: used in inline cache check (scratch)
duke@0 615 // G4: 2nd Long arg (32bit build);
duke@0 616 // G5: used in inline cache check (methodOop)
duke@0 617
duke@0 618 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
duke@0 619
duke@0 620 #ifdef _LP64
duke@0 621 // mov(s,d)
duke@0 622 __ mov(G1, L1);
duke@0 623 __ mov(G4, L4);
duke@0 624 __ mov(G5_method, L5);
duke@0 625 __ mov(G5_method, O0); // VM needs target method
duke@0 626 __ mov(I7, O1); // VM needs caller's callsite
duke@0 627 // Must be a leaf call...
duke@0 628 // can be very far once the blob has been relocated
twisti@720 629 AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
duke@0 630 __ relocate(relocInfo::runtime_call_type);
twisti@720 631 __ jumpl_to(dest, O7, O7);
duke@0 632 __ delayed()->mov(G2_thread, L7_thread_cache);
duke@0 633 __ mov(L7_thread_cache, G2_thread);
duke@0 634 __ mov(L1, G1);
duke@0 635 __ mov(L4, G4);
duke@0 636 __ mov(L5, G5_method);
duke@0 637 #else
duke@0 638 __ stx(G1, FP, -8 + STACK_BIAS);
duke@0 639 __ stx(G4, FP, -16 + STACK_BIAS);
duke@0 640 __ mov(G5_method, L5);
duke@0 641 __ mov(G5_method, O0); // VM needs target method
duke@0 642 __ mov(I7, O1); // VM needs caller's callsite
duke@0 643 // Must be a leaf call...
duke@0 644 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
duke@0 645 __ delayed()->mov(G2_thread, L7_thread_cache);
duke@0 646 __ mov(L7_thread_cache, G2_thread);
duke@0 647 __ ldx(FP, -8 + STACK_BIAS, G1);
duke@0 648 __ ldx(FP, -16 + STACK_BIAS, G4);
duke@0 649 __ mov(L5, G5_method);
duke@0 650 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
duke@0 651 #endif /* _LP64 */
duke@0 652
duke@0 653 __ restore(); // Restore args
duke@0 654 __ bind(L);
duke@0 655 }
duke@0 656
duke@0 657 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
duke@0 658 Register scratch) {
duke@0 659 if (TaggedStackInterpreter) {
twisti@991 660 RegisterOrConstant slot = tag_slot(st_off);
duke@0 661 // have to store zero because local slots can be reused (rats!)
duke@0 662 if (t == frame::TagValue) {
twisti@991 663 __ st_ptr(G0, base, slot);
duke@0 664 } else if (t == frame::TagCategory2) {
twisti@991 665 __ st_ptr(G0, base, slot);
twisti@991 666 __ st_ptr(G0, base, next_tag_slot(st_off));
duke@0 667 } else {
duke@0 668 __ mov(t, scratch);
twisti@991 669 __ st_ptr(scratch, base, slot);
duke@0 670 }
duke@0 671 }
duke@0 672 }
duke@0 673
twisti@991 674
twisti@991 675 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
twisti@991 676 RegisterOrConstant roc(arg_offset(st_off));
twisti@991 677 return __ ensure_simm13_or_reg(roc, Rdisp);
duke@0 678 }
duke@0 679
twisti@991 680 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
twisti@991 681 RegisterOrConstant roc(next_arg_offset(st_off));
twisti@991 682 return __ ensure_simm13_or_reg(roc, Rdisp);
duke@0 683 }
twisti@991 684
twisti@991 685
twisti@991 686 RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
twisti@991 687 RegisterOrConstant roc(tag_offset(st_off));
twisti@991 688 return __ ensure_simm13_or_reg(roc, Rdisp);
twisti@991 689 }
twisti@991 690
twisti@991 691 RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
twisti@991 692 RegisterOrConstant roc(next_tag_offset(st_off));
twisti@991 693 return __ ensure_simm13_or_reg(roc, Rdisp);
twisti@991 694 }
twisti@991 695
duke@0 696
duke@0 697 // Stores long into offset pointed to by base
duke@0 698 void AdapterGenerator::store_c2i_long(Register r, Register base,
duke@0 699 const int st_off, bool is_stack) {
duke@0 700 #ifdef _LP64
duke@0 701 // In V9, longs are given 2 64-bit slots in the interpreter, but the
duke@0 702 // data is passed in only 1 slot.
duke@0 703 __ stx(r, base, next_arg_slot(st_off));
duke@0 704 #else
ysr@344 705 #ifdef COMPILER2
duke@0 706 // Misaligned store of 64-bit data
duke@0 707 __ stw(r, base, arg_slot(st_off)); // lo bits
duke@0 708 __ srlx(r, 32, r);
duke@0 709 __ stw(r, base, next_arg_slot(st_off)); // hi bits
duke@0 710 #else
duke@0 711 if (is_stack) {
duke@0 712 // Misaligned store of 64-bit data
duke@0 713 __ stw(r, base, arg_slot(st_off)); // lo bits
duke@0 714 __ srlx(r, 32, r);
duke@0 715 __ stw(r, base, next_arg_slot(st_off)); // hi bits
duke@0 716 } else {
duke@0 717 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits
duke@0 718 __ stw(r , base, next_arg_slot(st_off)); // hi bits
duke@0 719 }
duke@0 720 #endif // COMPILER2
ysr@344 721 #endif // _LP64
duke@0 722 tag_c2i_arg(frame::TagCategory2, base, st_off, r);
duke@0 723 }
duke@0 724
duke@0 725 void AdapterGenerator::store_c2i_object(Register r, Register base,
duke@0 726 const int st_off) {
duke@0 727 __ st_ptr (r, base, arg_slot(st_off));
duke@0 728 tag_c2i_arg(frame::TagReference, base, st_off, r);
duke@0 729 }
duke@0 730
duke@0 731 void AdapterGenerator::store_c2i_int(Register r, Register base,
duke@0 732 const int st_off) {
duke@0 733 __ st (r, base, arg_slot(st_off));
duke@0 734 tag_c2i_arg(frame::TagValue, base, st_off, r);
duke@0 735 }
duke@0 736
duke@0 737 // Stores into offset pointed to by base
duke@0 738 void AdapterGenerator::store_c2i_double(VMReg r_2,
duke@0 739 VMReg r_1, Register base, const int st_off) {
duke@0 740 #ifdef _LP64
duke@0 741 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
duke@0 742 // data is passed in only 1 slot.
duke@0 743 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
duke@0 744 #else
duke@0 745 // Need to marshal 64-bit value from misaligned Lesp loads
duke@0 746 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
duke@0 747 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
duke@0 748 #endif
duke@0 749 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch);
duke@0 750 }
duke@0 751
duke@0 752 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
duke@0 753 const int st_off) {
duke@0 754 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
duke@0 755 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch);
duke@0 756 }
duke@0 757
duke@0 758 void AdapterGenerator::gen_c2i_adapter(
duke@0 759 int total_args_passed,
duke@0 760 // VMReg max_arg,
duke@0 761 int comp_args_on_stack, // VMRegStackSlots
duke@0 762 const BasicType *sig_bt,
duke@0 763 const VMRegPair *regs,
duke@0 764 Label& skip_fixup) {
duke@0 765
duke@0 766 // Before we get into the guts of the C2I adapter, see if we should be here
duke@0 767 // at all. We've come from compiled code and are attempting to jump to the
duke@0 768 // interpreter, which means the caller made a static call to get here
duke@0 769 // (vcalls always get a compiled target if there is one). Check for a
duke@0 770 // compiled target. If there is one, we need to patch the caller's call.
duke@0 771 // However we will run interpreted if we come thru here. The next pass
duke@0 772 // thru the call site will run compiled. If we ran compiled here then
duke@0 773 // we can (theorectically) do endless i2c->c2i->i2c transitions during
duke@0 774 // deopt/uncommon trap cycles. If we always go interpreted here then
duke@0 775 // we can have at most one and don't need to play any tricks to keep
duke@0 776 // from endlessly growing the stack.
duke@0 777 //
duke@0 778 // Actually if we detected that we had an i2c->c2i transition here we
duke@0 779 // ought to be able to reset the world back to the state of the interpreted
duke@0 780 // call and not bother building another interpreter arg area. We don't
duke@0 781 // do that at this point.
duke@0 782
duke@0 783 patch_callers_callsite();
duke@0 784
duke@0 785 __ bind(skip_fixup);
duke@0 786
duke@0 787 // Since all args are passed on the stack, total_args_passed*wordSize is the
duke@0 788 // space we need. Add in varargs area needed by the interpreter. Round up
duke@0 789 // to stack alignment.
duke@0 790 const int arg_size = total_args_passed * Interpreter::stackElementSize();
duke@0 791 const int varargs_area =
duke@0 792 (frame::varargs_offset - frame::register_save_words)*wordSize;
duke@0 793 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
duke@0 794
duke@0 795 int bias = STACK_BIAS;
duke@0 796 const int interp_arg_offset = frame::varargs_offset*wordSize +
duke@0 797 (total_args_passed-1)*Interpreter::stackElementSize();
duke@0 798
duke@0 799 Register base = SP;
duke@0 800
duke@0 801 #ifdef _LP64
duke@0 802 // In the 64bit build because of wider slots and STACKBIAS we can run
duke@0 803 // out of bits in the displacement to do loads and stores. Use g3 as
duke@0 804 // temporary displacement.
duke@0 805 if (! __ is_simm13(extraspace)) {
duke@0 806 __ set(extraspace, G3_scratch);
duke@0 807 __ sub(SP, G3_scratch, SP);
duke@0 808 } else {
duke@0 809 __ sub(SP, extraspace, SP);
duke@0 810 }
duke@0 811 set_Rdisp(G3_scratch);
duke@0 812 #else
duke@0 813 __ sub(SP, extraspace, SP);
duke@0 814 #endif // _LP64
duke@0 815
duke@0 816 // First write G1 (if used) to where ever it must go
duke@0 817 for (int i=0; i<total_args_passed; i++) {
duke@0 818 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
duke@0 819 VMReg r_1 = regs[i].first();
duke@0 820 VMReg r_2 = regs[i].second();
duke@0 821 if (r_1 == G1_scratch->as_VMReg()) {
duke@0 822 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
duke@0 823 store_c2i_object(G1_scratch, base, st_off);
duke@0 824 } else if (sig_bt[i] == T_LONG) {
duke@0 825 assert(!TieredCompilation, "should not use register args for longs");
duke@0 826 store_c2i_long(G1_scratch, base, st_off, false);
duke@0 827 } else {
duke@0 828 store_c2i_int(G1_scratch, base, st_off);
duke@0 829 }
duke@0 830 }
duke@0 831 }
duke@0 832
duke@0 833 // Now write the args into the outgoing interpreter space
duke@0 834 for (int i=0; i<total_args_passed; i++) {
duke@0 835 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias;
duke@0 836 VMReg r_1 = regs[i].first();
duke@0 837 VMReg r_2 = regs[i].second();
duke@0 838 if (!r_1->is_valid()) {
duke@0 839 assert(!r_2->is_valid(), "");
duke@0 840 continue;
duke@0 841 }
duke@0 842 // Skip G1 if found as we did it first in order to free it up
duke@0 843 if (r_1 == G1_scratch->as_VMReg()) {
duke@0 844 continue;
duke@0 845 }
duke@0 846 #ifdef ASSERT
duke@0 847 bool G1_forced = false;
duke@0 848 #endif // ASSERT
duke@0 849 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1
duke@0 850 #ifdef _LP64
duke@0 851 Register ld_off = Rdisp;
duke@0 852 __ set(reg2offset(r_1) + extraspace + bias, ld_off);
duke@0 853 #else
duke@0 854 int ld_off = reg2offset(r_1) + extraspace + bias;
duke@0 855 #ifdef ASSERT
duke@0 856 G1_forced = true;
duke@0 857 #endif // ASSERT
duke@0 858 #endif // _LP64
duke@0 859 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
duke@0 860 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
duke@0 861 else __ ldx(base, ld_off, G1_scratch);
duke@0 862 }
duke@0 863
duke@0 864 if (r_1->is_Register()) {
duke@0 865 Register r = r_1->as_Register()->after_restore();
duke@0 866 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
duke@0 867 store_c2i_object(r, base, st_off);
duke@0 868 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
duke@0 869 if (TieredCompilation) {
duke@0 870 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs");
duke@0 871 }
duke@0 872 store_c2i_long(r, base, st_off, r_2->is_stack());
duke@0 873 } else {
duke@0 874 store_c2i_int(r, base, st_off);
duke@0 875 }
duke@0 876 } else {
duke@0 877 assert(r_1->is_FloatRegister(), "");
duke@0 878 if (sig_bt[i] == T_FLOAT) {
duke@0 879 store_c2i_float(r_1->as_FloatRegister(), base, st_off);
duke@0 880 } else {
duke@0 881 assert(sig_bt[i] == T_DOUBLE, "wrong type");
duke@0 882 store_c2i_double(r_2, r_1, base, st_off);
duke@0 883 }
duke@0 884 }
duke@0 885 }
duke@0 886
duke@0 887 #ifdef _LP64
duke@0 888 // Need to reload G3_scratch, used for temporary displacements.
duke@0 889 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
duke@0 890
duke@0 891 // Pass O5_savedSP as an argument to the interpreter.
duke@0 892 // The interpreter will restore SP to this value before returning.
duke@0 893 __ set(extraspace, G1);
duke@0 894 __ add(SP, G1, O5_savedSP);
duke@0 895 #else
duke@0 896 // Pass O5_savedSP as an argument to the interpreter.
duke@0 897 // The interpreter will restore SP to this value before returning.
duke@0 898 __ add(SP, extraspace, O5_savedSP);
duke@0 899 #endif // _LP64
duke@0 900
duke@0 901 __ mov((frame::varargs_offset)*wordSize -
duke@0 902 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1);
duke@0 903 // Jump to the interpreter just as if interpreter was doing it.
duke@0 904 __ jmpl(G3_scratch, 0, G0);
duke@0 905 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp
duke@0 906 // (really L0) is in use by the compiled frame as a generic temp. However,
duke@0 907 // the interpreter does not know where its args are without some kind of
duke@0 908 // arg pointer being passed in. Pass it in Gargs.
duke@0 909 __ delayed()->add(SP, G1, Gargs);
duke@0 910 }
duke@0 911
duke@0 912 void AdapterGenerator::gen_i2c_adapter(
duke@0 913 int total_args_passed,
duke@0 914 // VMReg max_arg,
duke@0 915 int comp_args_on_stack, // VMRegStackSlots
duke@0 916 const BasicType *sig_bt,
duke@0 917 const VMRegPair *regs) {
duke@0 918
duke@0 919 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
duke@0 920 // layout. Lesp was saved by the calling I-frame and will be restored on
duke@0 921 // return. Meanwhile, outgoing arg space is all owned by the callee
duke@0 922 // C-frame, so we can mangle it at will. After adjusting the frame size,
duke@0 923 // hoist register arguments and repack other args according to the compiled
duke@0 924 // code convention. Finally, end in a jump to the compiled code. The entry
duke@0 925 // point address is the start of the buffer.
duke@0 926
duke@0 927 // We will only enter here from an interpreted frame and never from after
duke@0 928 // passing thru a c2i. Azul allowed this but we do not. If we lose the
duke@0 929 // race and use a c2i we will remain interpreted for the race loser(s).
duke@0 930 // This removes all sorts of headaches on the x86 side and also eliminates
duke@0 931 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
duke@0 932
duke@0 933 // As you can see from the list of inputs & outputs there are not a lot
duke@0 934 // of temp registers to work with: mostly G1, G3 & G4.
duke@0 935
duke@0 936 // Inputs:
duke@0 937 // G2_thread - TLS
duke@0 938 // G5_method - Method oop
jrose@689 939 // G4 (Gargs) - Pointer to interpreter's args
jrose@689 940 // O0..O4 - free for scratch
jrose@689 941 // O5_savedSP - Caller's saved SP, to be restored if needed
duke@0 942 // O6 - Current SP!
duke@0 943 // O7 - Valid return address
jrose@689 944 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
duke@0 945
duke@0 946 // Outputs:
duke@0 947 // G2_thread - TLS
duke@0 948 // G1, G4 - Outgoing long args in 32-bit build
duke@0 949 // O0-O5 - Outgoing args in compiled layout
duke@0 950 // O6 - Adjusted or restored SP
duke@0 951 // O7 - Valid return address
duke@0 952 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet)
duke@0 953 // F0-F7 - more outgoing args
duke@0 954
duke@0 955
jrose@689 956 // Gargs is the incoming argument base, and also an outgoing argument.
duke@0 957 __ sub(Gargs, BytesPerWord, Gargs);
duke@0 958
duke@0 959 #ifdef ASSERT
duke@0 960 {
duke@0 961 // on entry OsavedSP and SP should be equal
duke@0 962 Label ok;
duke@0 963 __ cmp(O5_savedSP, SP);
duke@0 964 __ br(Assembler::equal, false, Assembler::pt, ok);
duke@0 965 __ delayed()->nop();
duke@0 966 __ stop("I5_savedSP not set");
duke@0 967 __ should_not_reach_here();
duke@0 968 __ bind(ok);
duke@0 969 }
duke@0 970 #endif
duke@0 971
duke@0 972 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
duke@0 973 // WITH O7 HOLDING A VALID RETURN PC
duke@0 974 //
duke@0 975 // | |
duke@0 976 // : java stack :
duke@0 977 // | |
duke@0 978 // +--------------+ <--- start of outgoing args
duke@0 979 // | receiver | |
duke@0 980 // : rest of args : |---size is java-arg-words
duke@0 981 // | | |
duke@0 982 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
duke@0 983 // | | |
duke@0 984 // : unused : |---Space for max Java stack, plus stack alignment
duke@0 985 // | | |
duke@0 986 // +--------------+ <--- SP + 16*wordsize
duke@0 987 // | |
duke@0 988 // : window :
duke@0 989 // | |
duke@0 990 // +--------------+ <--- SP
duke@0 991
duke@0 992 // WE REPACK THE STACK. We use the common calling convention layout as
duke@0 993 // discovered by calling SharedRuntime::calling_convention. We assume it
duke@0 994 // causes an arbitrary shuffle of memory, which may require some register
duke@0 995 // temps to do the shuffle. We hope for (and optimize for) the case where
duke@0 996 // temps are not needed. We may have to resize the stack slightly, in case
duke@0 997 // we need alignment padding (32-bit interpreter can pass longs & doubles
duke@0 998 // misaligned, but the compilers expect them aligned).
duke@0 999 //
duke@0 1000 // | |
duke@0 1001 // : java stack :
duke@0 1002 // | |
duke@0 1003 // +--------------+ <--- start of outgoing args
duke@0 1004 // | pad, align | |
duke@0 1005 // +--------------+ |
duke@0 1006 // | ints, floats | |---Outgoing stack args, packed low.
duke@0 1007 // +--------------+ | First few args in registers.
duke@0 1008 // : doubles : |
duke@0 1009 // | longs | |
duke@0 1010 // +--------------+ <--- SP' + 16*wordsize
duke@0 1011 // | |
duke@0 1012 // : window :
duke@0 1013 // | |
duke@0 1014 // +--------------+ <--- SP'
duke@0 1015
duke@0 1016 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
duke@0 1017 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
duke@0 1018 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
duke@0 1019
duke@0 1020 // Cut-out for having no stack args. Since up to 6 args are passed
duke@0 1021 // in registers, we will commonly have no stack args.
duke@0 1022 if (comp_args_on_stack > 0) {
duke@0 1023
duke@0 1024 // Convert VMReg stack slots to words.
duke@0 1025 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
duke@0 1026 // Round up to miminum stack alignment, in wordSize
duke@0 1027 comp_words_on_stack = round_to(comp_words_on_stack, 2);
duke@0 1028 // Now compute the distance from Lesp to SP. This calculation does not
duke@0 1029 // include the space for total_args_passed because Lesp has not yet popped
duke@0 1030 // the arguments.
duke@0 1031 __ sub(SP, (comp_words_on_stack)*wordSize, SP);
duke@0 1032 }
duke@0 1033
duke@0 1034 // Will jump to the compiled code just as if compiled code was doing it.
duke@0 1035 // Pre-load the register-jump target early, to schedule it better.
duke@0 1036 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
duke@0 1037
duke@0 1038 // Now generate the shuffle code. Pick up all register args and move the
duke@0 1039 // rest through G1_scratch.
duke@0 1040 for (int i=0; i<total_args_passed; i++) {
duke@0 1041 if (sig_bt[i] == T_VOID) {
duke@0 1042 // Longs and doubles are passed in native word order, but misaligned
duke@0 1043 // in the 32-bit build.
duke@0 1044 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
duke@0 1045 continue;
duke@0 1046 }
duke@0 1047
duke@0 1048 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the
duke@0 1049 // 32-bit build and aligned in the 64-bit build. Look for the obvious
duke@0 1050 // ldx/lddf optimizations.
duke@0 1051
duke@0 1052 // Load in argument order going down.
duke@0 1053 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
duke@0 1054 set_Rdisp(G1_scratch);
duke@0 1055
duke@0 1056 VMReg r_1 = regs[i].first();
duke@0 1057 VMReg r_2 = regs[i].second();
duke@0 1058 if (!r_1->is_valid()) {
duke@0 1059 assert(!r_2->is_valid(), "");
duke@0 1060 continue;
duke@0 1061 }
duke@0 1062 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9
duke@0 1063 r_1 = F8->as_VMReg(); // as part of the load/store shuffle
duke@0 1064 if (r_2->is_valid()) r_2 = r_1->next();
duke@0 1065 }
duke@0 1066 if (r_1->is_Register()) { // Register argument
duke@0 1067 Register r = r_1->as_Register()->after_restore();
duke@0 1068 if (!r_2->is_valid()) {
duke@0 1069 __ ld(Gargs, arg_slot(ld_off), r);
duke@0 1070 } else {
duke@0 1071 #ifdef _LP64
duke@0 1072 // In V9, longs are given 2 64-bit slots in the interpreter, but the
duke@0 1073 // data is passed in only 1 slot.
twisti@991 1074 RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
duke@0 1075 next_arg_slot(ld_off) : arg_slot(ld_off);
duke@0 1076 __ ldx(Gargs, slot, r);
duke@0 1077 #else
duke@0 1078 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
duke@0 1079 // stack shuffle. Load the first 2 longs into G1/G4 later.
duke@0 1080 #endif
duke@0 1081 }
duke@0 1082 } else {
duke@0 1083 assert(r_1->is_FloatRegister(), "");
duke@0 1084 if (!r_2->is_valid()) {
duke@0 1085 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
duke@0 1086 } else {
duke@0 1087 #ifdef _LP64
duke@0 1088 // In V9, doubles are given 2 64-bit slots in the interpreter, but the
duke@0 1089 // data is passed in only 1 slot. This code also handles longs that
duke@0 1090 // are passed on the stack, but need a stack-to-stack move through a
duke@0 1091 // spare float register.
twisti@991 1092 RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
duke@0 1093 next_arg_slot(ld_off) : arg_slot(ld_off);
duke@0 1094 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
duke@0 1095 #else
duke@0 1096 // Need to marshal 64-bit value from misaligned Lesp loads
duke@0 1097 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
duke@0 1098 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
duke@0 1099 #endif
duke@0 1100 }
duke@0 1101 }
duke@0 1102 // Was the argument really intended to be on the stack, but was loaded
duke@0 1103 // into F8/F9?
duke@0 1104 if (regs[i].first()->is_stack()) {
duke@0 1105 assert(r_1->as_FloatRegister() == F8, "fix this code");
duke@0 1106 // Convert stack slot to an SP offset
duke@0 1107 int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
duke@0 1108 // Store down the shuffled stack word. Target address _is_ aligned.
twisti@991 1109 RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
twisti@991 1110 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
twisti@991 1111 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
duke@0 1112 }
duke@0 1113 }
duke@0 1114 bool made_space = false;
duke@0 1115 #ifndef _LP64
duke@0 1116 // May need to pick up a few long args in G1/G4
duke@0 1117 bool g4_crushed = false;
duke@0 1118 bool g3_crushed = false;
duke@0 1119 for (int i=0; i<total_args_passed; i++) {
duke@0 1120 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
duke@0 1121 // Load in argument order going down
duke@0 1122 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
duke@0 1123 // Need to marshal 64-bit value from misaligned Lesp loads
duke@0 1124 Register r = regs[i].first()->as_Register()->after_restore();
duke@0 1125 if (r == G1 || r == G4) {
duke@0 1126 assert(!g4_crushed, "ordering problem");
duke@0 1127 if (r == G4){
duke@0 1128 g4_crushed = true;
duke@0 1129 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
duke@0 1130 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
duke@0 1131 } else {
duke@0 1132 // better schedule this way
duke@0 1133 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
duke@0 1134 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits
duke@0 1135 }
duke@0 1136 g3_crushed = true;
duke@0 1137 __ sllx(r, 32, r);
duke@0 1138 __ or3(G3_scratch, r, r);
duke@0 1139 } else {
duke@0 1140 assert(r->is_out(), "longs passed in two O registers");
duke@0 1141 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits
duke@0 1142 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits
duke@0 1143 }
duke@0 1144 }
duke@0 1145 }
duke@0 1146 #endif
duke@0 1147
duke@0 1148 // Jump to the compiled code just as if compiled code was doing it.
duke@0 1149 //
duke@0 1150 #ifndef _LP64
duke@0 1151 if (g3_crushed) {
duke@0 1152 // Rats load was wasted, at least it is in cache...
twisti@720 1153 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3);
duke@0 1154 }
duke@0 1155 #endif /* _LP64 */
duke@0 1156
duke@0 1157 // 6243940 We might end up in handle_wrong_method if
duke@0 1158 // the callee is deoptimized as we race thru here. If that
duke@0 1159 // happens we don't want to take a safepoint because the
duke@0 1160 // caller frame will look interpreted and arguments are now
duke@0 1161 // "compiled" so it is much better to make this transition
duke@0 1162 // invisible to the stack walking code. Unfortunately if
duke@0 1163 // we try and find the callee by normal means a safepoint
duke@0 1164 // is possible. So we stash the desired callee in the thread
duke@0 1165 // and the vm will find there should this case occur.
twisti@720 1166 Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
duke@0 1167 __ st_ptr(G5_method, callee_target_addr);
duke@0 1168
duke@0 1169 if (StressNonEntrant) {
duke@0 1170 // Open a big window for deopt failure
duke@0 1171 __ save_frame(0);
duke@0 1172 __ mov(G0, L0);
duke@0 1173 Label loop;
duke@0 1174 __ bind(loop);
duke@0 1175 __ sub(L0, 1, L0);
duke@0 1176 __ br_null(L0, false, Assembler::pt, loop);
duke@0 1177 __ delayed()->nop();
duke@0 1178
duke@0 1179 __ restore();
duke@0 1180 }
duke@0 1181
duke@0 1182
duke@0 1183 __ jmpl(G3, 0, G0);
duke@0 1184 __ delayed()->nop();
duke@0 1185 }
duke@0 1186
duke@0 1187 // ---------------------------------------------------------------
duke@0 1188 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
duke@0 1189 int total_args_passed,
duke@0 1190 // VMReg max_arg,
duke@0 1191 int comp_args_on_stack, // VMRegStackSlots
duke@0 1192 const BasicType *sig_bt,
duke@0 1193 const VMRegPair *regs) {
duke@0 1194 address i2c_entry = __ pc();
duke@0 1195
duke@0 1196 AdapterGenerator agen(masm);
duke@0 1197
duke@0 1198 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
duke@0 1199
duke@0 1200
duke@0 1201 // -------------------------------------------------------------------------
duke@0 1202 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
duke@0 1203 // args start out packed in the compiled layout. They need to be unpacked
duke@0 1204 // into the interpreter layout. This will almost always require some stack
duke@0 1205 // space. We grow the current (compiled) stack, then repack the args. We
duke@0 1206 // finally end in a jump to the generic interpreter entry point. On exit
duke@0 1207 // from the interpreter, the interpreter will restore our SP (lest the
duke@0 1208 // compiled code, which relys solely on SP and not FP, get sick).
duke@0 1209
duke@0 1210 address c2i_unverified_entry = __ pc();
duke@0 1211 Label skip_fixup;
duke@0 1212 {
duke@0 1213 #if !defined(_LP64) && defined(COMPILER2)
duke@0 1214 Register R_temp = L0; // another scratch register
duke@0 1215 #else
duke@0 1216 Register R_temp = G1; // another scratch register
duke@0 1217 #endif
duke@0 1218
twisti@720 1219 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
duke@0 1220
duke@0 1221 __ verify_oop(O0);
duke@0 1222 __ verify_oop(G5_method);
coleenp@108 1223 __ load_klass(O0, G3_scratch);
duke@0 1224 __ verify_oop(G3_scratch);
duke@0 1225
duke@0 1226 #if !defined(_LP64) && defined(COMPILER2)
duke@0 1227 __ save(SP, -frame::register_save_words*wordSize, SP);
duke@0 1228 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
duke@0 1229 __ verify_oop(R_temp);
duke@0 1230 __ cmp(G3_scratch, R_temp);
duke@0 1231 __ restore();
duke@0 1232 #else
duke@0 1233 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp);
duke@0 1234 __ verify_oop(R_temp);
duke@0 1235 __ cmp(G3_scratch, R_temp);
duke@0 1236 #endif
duke@0 1237
duke@0 1238 Label ok, ok2;
duke@0 1239 __ brx(Assembler::equal, false, Assembler::pt, ok);
duke@0 1240 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method);
twisti@720 1241 __ jump_to(ic_miss, G3_scratch);
duke@0 1242 __ delayed()->nop();
duke@0 1243
duke@0 1244 __ bind(ok);
duke@0 1245 // Method might have been compiled since the call site was patched to
duke@0 1246 // interpreted if that is the case treat it as a miss so we can get
duke@0 1247 // the call site corrected.
duke@0 1248 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch);
duke@0 1249 __ bind(ok2);
duke@0 1250 __ br_null(G3_scratch, false, __ pt, skip_fixup);
duke@0 1251 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
twisti@720 1252 __ jump_to(ic_miss, G3_scratch);
duke@0 1253 __ delayed()->nop();
duke@0 1254
duke@0 1255 }
duke@0 1256
duke@0 1257 address c2i_entry = __ pc();
duke@0 1258
duke@0 1259 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
duke@0 1260
duke@0 1261 __ flush();
duke@0 1262 return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry);
duke@0 1263
duke@0 1264 }
duke@0 1265
duke@0 1266 // Helper function for native calling conventions
duke@0 1267 static VMReg int_stk_helper( int i ) {
duke@0 1268 // Bias any stack based VMReg we get by ignoring the window area
duke@0 1269 // but not the register parameter save area.
duke@0 1270 //
duke@0 1271 // This is strange for the following reasons. We'd normally expect
duke@0 1272 // the calling convention to return an VMReg for a stack slot
duke@0 1273 // completely ignoring any abi reserved area. C2 thinks of that
duke@0 1274 // abi area as only out_preserve_stack_slots. This does not include
duke@0 1275 // the area allocated by the C abi to store down integer arguments
duke@0 1276 // because the java calling convention does not use it. So
duke@0 1277 // since c2 assumes that there are only out_preserve_stack_slots
duke@0 1278 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
duke@0 1279 // location the c calling convention must add in this bias amount
duke@0 1280 // to make up for the fact that the out_preserve_stack_slots is
duke@0 1281 // insufficient for C calls. What a mess. I sure hope those 6
duke@0 1282 // stack words were worth it on every java call!
duke@0 1283
duke@0 1284 // Another way of cleaning this up would be for out_preserve_stack_slots
duke@0 1285 // to take a parameter to say whether it was C or java calling conventions.
duke@0 1286 // Then things might look a little better (but not much).
duke@0 1287
duke@0 1288 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
duke@0 1289 if( mem_parm_offset < 0 ) {
duke@0 1290 return as_oRegister(i)->as_VMReg();
duke@0 1291 } else {
duke@0 1292 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
duke@0 1293 // Now return a biased offset that will be correct when out_preserve_slots is added back in
duke@0 1294 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
duke@0 1295 }
duke@0 1296 }
duke@0 1297
duke@0 1298
duke@0 1299 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
duke@0 1300 VMRegPair *regs,
duke@0 1301 int total_args_passed) {
duke@0 1302
duke@0 1303 // Return the number of VMReg stack_slots needed for the args.
duke@0 1304 // This value does not include an abi space (like register window
duke@0 1305 // save area).
duke@0 1306
duke@0 1307 // The native convention is V8 if !LP64
duke@0 1308 // The LP64 convention is the V9 convention which is slightly more sane.
duke@0 1309
duke@0 1310 // We return the amount of VMReg stack slots we need to reserve for all
duke@0 1311 // the arguments NOT counting out_preserve_stack_slots. Since we always
duke@0 1312 // have space for storing at least 6 registers to memory we start with that.
duke@0 1313 // See int_stk_helper for a further discussion.
duke@0 1314 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
duke@0 1315
duke@0 1316 #ifdef _LP64
duke@0 1317 // V9 convention: All things "as-if" on double-wide stack slots.
duke@0 1318 // Hoist any int/ptr/long's in the first 6 to int regs.
duke@0 1319 // Hoist any flt/dbl's in the first 16 dbl regs.
duke@0 1320 int j = 0; // Count of actual args, not HALVES
duke@0 1321 for( int i=0; i<total_args_passed; i++, j++ ) {
duke@0 1322 switch( sig_bt[i] ) {
duke@0 1323 case T_BOOLEAN:
duke@0 1324 case T_BYTE:
duke@0 1325 case T_CHAR:
duke@0 1326 case T_INT:
duke@0 1327 case T_SHORT:
duke@0 1328 regs[i].set1( int_stk_helper( j ) ); break;
duke@0 1329 case T_LONG:
duke@0 1330 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@0 1331 case T_ADDRESS: // raw pointers, like current thread, for VM calls
duke@0 1332 case T_ARRAY:
duke@0 1333 case T_OBJECT:
duke@0 1334 regs[i].set2( int_stk_helper( j ) );
duke@0 1335 break;
duke@0 1336 case T_FLOAT:
duke@0 1337 if ( j < 16 ) {
duke@0 1338 // V9ism: floats go in ODD registers
duke@0 1339 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
duke@0 1340 } else {
duke@0 1341 // V9ism: floats go in ODD stack slot
duke@0 1342 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
duke@0 1343 }
duke@0 1344 break;
duke@0 1345 case T_DOUBLE:
duke@0 1346 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@0 1347 if ( j < 16 ) {
duke@0 1348 // V9ism: doubles go in EVEN/ODD regs
duke@0 1349 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
duke@0 1350 } else {
duke@0 1351 // V9ism: doubles go in EVEN/ODD stack slots
duke@0 1352 regs[i].set2(VMRegImpl::stack2reg(j<<1));
duke@0 1353 }
duke@0 1354 break;
duke@0 1355 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
duke@0 1356 default:
duke@0 1357 ShouldNotReachHere();
duke@0 1358 }
duke@0 1359 if (regs[i].first()->is_stack()) {
duke@0 1360 int off = regs[i].first()->reg2stack();
duke@0 1361 if (off > max_stack_slots) max_stack_slots = off;
duke@0 1362 }
duke@0 1363 if (regs[i].second()->is_stack()) {
duke@0 1364 int off = regs[i].second()->reg2stack();
duke@0 1365 if (off > max_stack_slots) max_stack_slots = off;
duke@0 1366 }
duke@0 1367 }
duke@0 1368
duke@0 1369 #else // _LP64
duke@0 1370 // V8 convention: first 6 things in O-regs, rest on stack.
duke@0 1371 // Alignment is willy-nilly.
duke@0 1372 for( int i=0; i<total_args_passed; i++ ) {
duke@0 1373 switch( sig_bt[i] ) {
duke@0 1374 case T_ADDRESS: // raw pointers, like current thread, for VM calls
duke@0 1375 case T_ARRAY:
duke@0 1376 case T_BOOLEAN:
duke@0 1377 case T_BYTE:
duke@0 1378 case T_CHAR:
duke@0 1379 case T_FLOAT:
duke@0 1380 case T_INT:
duke@0 1381 case T_OBJECT:
duke@0 1382 case T_SHORT:
duke@0 1383 regs[i].set1( int_stk_helper( i ) );
duke@0 1384 break;
duke@0 1385 case T_DOUBLE:
duke@0 1386 case T_LONG:
duke@0 1387 assert( sig_bt[i+1] == T_VOID, "expecting half" );
duke@0 1388 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
duke@0 1389 break;
duke@0 1390 case T_VOID: regs[i].set_bad(); break;
duke@0 1391 default:
duke@0 1392 ShouldNotReachHere();
duke@0 1393 }
duke@0 1394 if (regs[i].first()->is_stack()) {
duke@0 1395 int off = regs[i].first()->reg2stack();
duke@0 1396 if (off > max_stack_slots) max_stack_slots = off;
duke@0 1397 }
duke@0 1398 if (regs[i].second()->is_stack()) {
duke@0 1399 int off = regs[i].second()->reg2stack();
duke@0 1400 if (off > max_stack_slots) max_stack_slots = off;
duke@0 1401 }
duke@0 1402 }
duke@0 1403 #endif // _LP64
duke@0 1404
duke@0 1405 return round_to(max_stack_slots + 1, 2);
duke@0 1406
duke@0 1407 }
duke@0 1408
duke@0 1409
duke@0 1410 // ---------------------------------------------------------------------------
duke@0 1411 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
duke@0 1412 switch (ret_type) {
duke@0 1413 case T_FLOAT:
duke@0 1414 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
duke@0 1415 break;
duke@0 1416 case T_DOUBLE:
duke@0 1417 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
duke@0 1418 break;
duke@0 1419 }
duke@0 1420 }
duke@0 1421
duke@0 1422 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
duke@0 1423 switch (ret_type) {
duke@0 1424 case T_FLOAT:
duke@0 1425 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
duke@0 1426 break;
duke@0 1427 case T_DOUBLE:
duke@0 1428 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
duke@0 1429 break;
duke@0 1430 }
duke@0 1431 }
duke@0 1432
duke@0 1433 // Check and forward and pending exception. Thread is stored in
duke@0 1434 // L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there
duke@0 1435 // is no exception handler. We merely pop this frame off and throw the
duke@0 1436 // exception in the caller's frame.
duke@0 1437 static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
duke@0 1438 Label L;
duke@0 1439 __ br_null(Rex_oop, false, Assembler::pt, L);
duke@0 1440 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
duke@0 1441 // Since this is a native call, we *know* the proper exception handler
duke@0 1442 // without calling into the VM: it's the empty function. Just pop this
duke@0 1443 // frame and then jump to forward_exception_entry; O7 will contain the
duke@0 1444 // native caller's return PC.
twisti@720 1445 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
twisti@720 1446 __ jump_to(exception_entry, G3_scratch);
duke@0 1447 __ delayed()->restore(); // Pop this frame off.
duke@0 1448 __ bind(L);
duke@0 1449 }
duke@0 1450
duke@0 1451 // A simple move of integer like type
duke@0 1452 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1453 if (src.first()->is_stack()) {
duke@0 1454 if (dst.first()->is_stack()) {
duke@0 1455 // stack to stack
duke@0 1456 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@0 1457 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1458 } else {
duke@0 1459 // stack to reg
duke@0 1460 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1461 }
duke@0 1462 } else if (dst.first()->is_stack()) {
duke@0 1463 // reg to stack
duke@0 1464 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1465 } else {
duke@0 1466 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@0 1467 }
duke@0 1468 }
duke@0 1469
duke@0 1470 // On 64 bit we will store integer like items to the stack as
duke@0 1471 // 64 bits items (sparc abi) even though java would only store
duke@0 1472 // 32bits for a parameter. On 32bit it will simply be 32 bits
duke@0 1473 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
duke@0 1474 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1475 if (src.first()->is_stack()) {
duke@0 1476 if (dst.first()->is_stack()) {
duke@0 1477 // stack to stack
duke@0 1478 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@0 1479 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1480 } else {
duke@0 1481 // stack to reg
duke@0 1482 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1483 }
duke@0 1484 } else if (dst.first()->is_stack()) {
duke@0 1485 // reg to stack
duke@0 1486 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1487 } else {
duke@0 1488 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@0 1489 }
duke@0 1490 }
duke@0 1491
duke@0 1492
duke@0 1493 // An oop arg. Must pass a handle not the oop itself
duke@0 1494 static void object_move(MacroAssembler* masm,
duke@0 1495 OopMap* map,
duke@0 1496 int oop_handle_offset,
duke@0 1497 int framesize_in_slots,
duke@0 1498 VMRegPair src,
duke@0 1499 VMRegPair dst,
duke@0 1500 bool is_receiver,
duke@0 1501 int* receiver_offset) {
duke@0 1502
duke@0 1503 // must pass a handle. First figure out the location we use as a handle
duke@0 1504
duke@0 1505 if (src.first()->is_stack()) {
duke@0 1506 // Oop is already on the stack
duke@0 1507 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
duke@0 1508 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
duke@0 1509 __ ld_ptr(rHandle, 0, L4);
duke@0 1510 #ifdef _LP64
duke@0 1511 __ movr( Assembler::rc_z, L4, G0, rHandle );
duke@0 1512 #else
duke@0 1513 __ tst( L4 );
duke@0 1514 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
duke@0 1515 #endif
duke@0 1516 if (dst.first()->is_stack()) {
duke@0 1517 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1518 }
duke@0 1519 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
duke@0 1520 if (is_receiver) {
duke@0 1521 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
duke@0 1522 }
duke@0 1523 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
duke@0 1524 } else {
duke@0 1525 // Oop is in an input register pass we must flush it to the stack
duke@0 1526 const Register rOop = src.first()->as_Register();
duke@0 1527 const Register rHandle = L5;
duke@0 1528 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
duke@0 1529 int offset = oop_slot*VMRegImpl::stack_slot_size;
duke@0 1530 Label skip;
duke@0 1531 __ st_ptr(rOop, SP, offset + STACK_BIAS);
duke@0 1532 if (is_receiver) {
duke@0 1533 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
duke@0 1534 }
duke@0 1535 map->set_oop(VMRegImpl::stack2reg(oop_slot));
duke@0 1536 __ add(SP, offset + STACK_BIAS, rHandle);
duke@0 1537 #ifdef _LP64
duke@0 1538 __ movr( Assembler::rc_z, rOop, G0, rHandle );
duke@0 1539 #else
duke@0 1540 __ tst( rOop );
duke@0 1541 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
duke@0 1542 #endif
duke@0 1543
duke@0 1544 if (dst.first()->is_stack()) {
duke@0 1545 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1546 } else {
duke@0 1547 __ mov(rHandle, dst.first()->as_Register());
duke@0 1548 }
duke@0 1549 }
duke@0 1550 }
duke@0 1551
duke@0 1552 // A float arg may have to do float reg int reg conversion
duke@0 1553 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1554 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
duke@0 1555
duke@0 1556 if (src.first()->is_stack()) {
duke@0 1557 if (dst.first()->is_stack()) {
duke@0 1558 // stack to stack the easiest of the bunch
duke@0 1559 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@0 1560 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1561 } else {
duke@0 1562 // stack to reg
duke@0 1563 if (dst.first()->is_Register()) {
duke@0 1564 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1565 } else {
duke@0 1566 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@0 1567 }
duke@0 1568 }
duke@0 1569 } else if (dst.first()->is_stack()) {
duke@0 1570 // reg to stack
duke@0 1571 if (src.first()->is_Register()) {
duke@0 1572 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1573 } else {
duke@0 1574 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1575 }
duke@0 1576 } else {
duke@0 1577 // reg to reg
duke@0 1578 if (src.first()->is_Register()) {
duke@0 1579 if (dst.first()->is_Register()) {
duke@0 1580 // gpr -> gpr
duke@0 1581 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@0 1582 } else {
duke@0 1583 // gpr -> fpr
duke@0 1584 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
duke@0 1585 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
duke@0 1586 }
duke@0 1587 } else if (dst.first()->is_Register()) {
duke@0 1588 // fpr -> gpr
duke@0 1589 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
duke@0 1590 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
duke@0 1591 } else {
duke@0 1592 // fpr -> fpr
duke@0 1593 // In theory these overlap but the ordering is such that this is likely a nop
duke@0 1594 if ( src.first() != dst.first()) {
duke@0 1595 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
duke@0 1596 }
duke@0 1597 }
duke@0 1598 }
duke@0 1599 }
duke@0 1600
duke@0 1601 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1602 VMRegPair src_lo(src.first());
duke@0 1603 VMRegPair src_hi(src.second());
duke@0 1604 VMRegPair dst_lo(dst.first());
duke@0 1605 VMRegPair dst_hi(dst.second());
duke@0 1606 simple_move32(masm, src_lo, dst_lo);
duke@0 1607 simple_move32(masm, src_hi, dst_hi);
duke@0 1608 }
duke@0 1609
duke@0 1610 // A long move
duke@0 1611 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1612
duke@0 1613 // Do the simple ones here else do two int moves
duke@0 1614 if (src.is_single_phys_reg() ) {
duke@0 1615 if (dst.is_single_phys_reg()) {
duke@0 1616 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@0 1617 } else {
duke@0 1618 // split src into two separate registers
duke@0 1619 // Remember hi means hi address or lsw on sparc
duke@0 1620 // Move msw to lsw
duke@0 1621 if (dst.second()->is_reg()) {
duke@0 1622 // MSW -> MSW
duke@0 1623 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
duke@0 1624 // Now LSW -> LSW
duke@0 1625 // this will only move lo -> lo and ignore hi
duke@0 1626 VMRegPair split(dst.second());
duke@0 1627 simple_move32(masm, src, split);
duke@0 1628 } else {
duke@0 1629 VMRegPair split(src.first(), L4->as_VMReg());
duke@0 1630 // MSW -> MSW (lo ie. first word)
duke@0 1631 __ srax(src.first()->as_Register(), 32, L4);
duke@0 1632 split_long_move(masm, split, dst);
duke@0 1633 }
duke@0 1634 }
duke@0 1635 } else if (dst.is_single_phys_reg()) {
duke@0 1636 if (src.is_adjacent_aligned_on_stack(2)) {
never@297 1637 __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1638 } else {
duke@0 1639 // dst is a single reg.
duke@0 1640 // Remember lo is low address not msb for stack slots
duke@0 1641 // and lo is the "real" register for registers
duke@0 1642 // src is
duke@0 1643
duke@0 1644 VMRegPair split;
duke@0 1645
duke@0 1646 if (src.first()->is_reg()) {
duke@0 1647 // src.lo (msw) is a reg, src.hi is stk/reg
duke@0 1648 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
duke@0 1649 split.set_pair(dst.first(), src.first());
duke@0 1650 } else {
duke@0 1651 // msw is stack move to L5
duke@0 1652 // lsw is stack move to dst.lo (real reg)
duke@0 1653 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
duke@0 1654 split.set_pair(dst.first(), L5->as_VMReg());
duke@0 1655 }
duke@0 1656
duke@0 1657 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
duke@0 1658 // msw -> src.lo/L5, lsw -> dst.lo
duke@0 1659 split_long_move(masm, src, split);
duke@0 1660
duke@0 1661 // So dst now has the low order correct position the
duke@0 1662 // msw half
duke@0 1663 __ sllx(split.first()->as_Register(), 32, L5);
duke@0 1664
duke@0 1665 const Register d = dst.first()->as_Register();
duke@0 1666 __ or3(L5, d, d);
duke@0 1667 }
duke@0 1668 } else {
duke@0 1669 // For LP64 we can probably do better.
duke@0 1670 split_long_move(masm, src, dst);
duke@0 1671 }
duke@0 1672 }
duke@0 1673
duke@0 1674 // A double move
duke@0 1675 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
duke@0 1676
duke@0 1677 // The painful thing here is that like long_move a VMRegPair might be
duke@0 1678 // 1: a single physical register
duke@0 1679 // 2: two physical registers (v8)
duke@0 1680 // 3: a physical reg [lo] and a stack slot [hi] (v8)
duke@0 1681 // 4: two stack slots
duke@0 1682
duke@0 1683 // Since src is always a java calling convention we know that the src pair
duke@0 1684 // is always either all registers or all stack (and aligned?)
duke@0 1685
duke@0 1686 // in a register [lo] and a stack slot [hi]
duke@0 1687 if (src.first()->is_stack()) {
duke@0 1688 if (dst.first()->is_stack()) {
duke@0 1689 // stack to stack the easiest of the bunch
duke@0 1690 // ought to be a way to do this where if alignment is ok we use ldd/std when possible
duke@0 1691 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
duke@0 1692 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@0 1693 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1694 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1695 } else {
duke@0 1696 // stack to reg
duke@0 1697 if (dst.second()->is_stack()) {
duke@0 1698 // stack -> reg, stack -> stack
duke@0 1699 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@0 1700 if (dst.first()->is_Register()) {
duke@0 1701 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1702 } else {
duke@0 1703 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@0 1704 }
duke@0 1705 // This was missing. (very rare case)
duke@0 1706 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1707 } else {
duke@0 1708 // stack -> reg
duke@0 1709 // Eventually optimize for alignment QQQ
duke@0 1710 if (dst.first()->is_Register()) {
duke@0 1711 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
duke@0 1712 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
duke@0 1713 } else {
duke@0 1714 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
duke@0 1715 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
duke@0 1716 }
duke@0 1717 }
duke@0 1718 }
duke@0 1719 } else if (dst.first()->is_stack()) {
duke@0 1720 // reg to stack
duke@0 1721 if (src.first()->is_Register()) {
duke@0 1722 // Eventually optimize for alignment QQQ
duke@0 1723 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1724 if (src.second()->is_stack()) {
duke@0 1725 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
duke@0 1726 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1727 } else {
duke@0 1728 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1729 }
duke@0 1730 } else {
duke@0 1731 // fpr to stack
duke@0 1732 if (src.second()->is_stack()) {
duke@0 1733 ShouldNotReachHere();
duke@0 1734 } else {
duke@0 1735 // Is the stack aligned?
duke@0 1736 if (reg2offset(dst.first()) & 0x7) {
duke@0 1737 // No do as pairs
duke@0 1738 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1739 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1740 } else {
duke@0 1741 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
duke@0 1742 }
duke@0 1743 }
duke@0 1744 }
duke@0 1745 } else {
duke@0 1746 // reg to reg
duke@0 1747 if (src.first()->is_Register()) {
duke@0 1748 if (dst.first()->is_Register()) {
duke@0 1749 // gpr -> gpr
duke@0 1750 __ mov(src.first()->as_Register(), dst.first()->as_Register());
duke@0 1751 __ mov(src.second()->as_Register(), dst.second()->as_Register());
duke@0 1752 } else {
duke@0 1753 // gpr -> fpr
duke@0 1754 // ought to be able to do a single store
duke@0 1755 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
duke@0 1756 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
duke@0 1757 // ought to be able to do a single load
duke@0 1758 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
duke@0 1759 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
duke@0 1760 }
duke@0 1761 } else if (dst.first()->is_Register()) {
duke@0 1762 // fpr -> gpr
duke@0 1763 // ought to be able to do a single store
duke@0 1764 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
duke@0 1765 // ought to be able to do a single load
duke@0 1766 // REMEMBER first() is low address not LSB
duke@0 1767 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
duke@0 1768 if (dst.second()->is_Register()) {
duke@0 1769 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
duke@0 1770 } else {
duke@0 1771 __ ld(FP, -4 + STACK_BIAS, L4);
duke@0 1772 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
duke@0 1773 }
duke@0 1774 } else {
duke@0 1775 // fpr -> fpr
duke@0 1776 // In theory these overlap but the ordering is such that this is likely a nop
duke@0 1777 if ( src.first() != dst.first()) {
duke@0 1778 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
duke@0 1779 }
duke@0 1780 }
duke@0 1781 }
duke@0 1782 }
duke@0 1783
duke@0 1784 // Creates an inner frame if one hasn't already been created, and
duke@0 1785 // saves a copy of the thread in L7_thread_cache
duke@0 1786 static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
duke@0 1787 if (!*already_created) {
duke@0 1788 __ save_frame(0);
duke@0 1789 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
duke@0 1790 // Don't use save_thread because it smashes G2 and we merely want to save a
duke@0 1791 // copy
duke@0 1792 __ mov(G2_thread, L7_thread_cache);
duke@0 1793 *already_created = true;
duke@0 1794 }
duke@0 1795 }
duke@0 1796
duke@0 1797 // ---------------------------------------------------------------------------
duke@0 1798 // Generate a native wrapper for a given method. The method takes arguments
duke@0 1799 // in the Java compiled code convention, marshals them to the native
duke@0 1800 // convention (handlizes oops, etc), transitions to native, makes the call,
duke@0 1801 // returns to java state (possibly blocking), unhandlizes any result and
duke@0 1802 // returns.
duke@0 1803 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
duke@0 1804 methodHandle method,
duke@0 1805 int total_in_args,
duke@0 1806 int comp_args_on_stack, // in VMRegStackSlots
duke@0 1807 BasicType *in_sig_bt,
duke@0 1808 VMRegPair *in_regs,
duke@0 1809 BasicType ret_type) {
duke@0 1810
duke@0 1811 // Native nmethod wrappers never take possesion of the oop arguments.
duke@0 1812 // So the caller will gc the arguments. The only thing we need an
duke@0 1813 // oopMap for is if the call is static
duke@0 1814 //
duke@0 1815 // An OopMap for lock (and class if static), and one for the VM call itself
duke@0 1816 OopMapSet *oop_maps = new OopMapSet();
duke@0 1817 intptr_t start = (intptr_t)__ pc();
duke@0 1818
duke@0 1819 // First thing make an ic check to see if we should even be here
duke@0 1820 {
duke@0 1821 Label L;
duke@0 1822 const Register temp_reg = G3_scratch;
twisti@720 1823 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
duke@0 1824 __ verify_oop(O0);
coleenp@108 1825 __ load_klass(O0, temp_reg);
duke@0 1826 __ cmp(temp_reg, G5_inline_cache_reg);
duke@0 1827 __ brx(Assembler::equal, true, Assembler::pt, L);
duke@0 1828 __ delayed()->nop();
duke@0 1829
twisti@720 1830 __ jump_to(ic_miss, temp_reg);
duke@0 1831 __ delayed()->nop();
duke@0 1832 __ align(CodeEntryAlignment);
duke@0 1833 __ bind(L);
duke@0 1834 }
duke@0 1835
duke@0 1836 int vep_offset = ((intptr_t)__ pc()) - start;
duke@0 1837
duke@0 1838 #ifdef COMPILER1
duke@0 1839 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
duke@0 1840 // Object.hashCode can pull the hashCode from the header word
duke@0 1841 // instead of doing a full VM transition once it's been computed.
duke@0 1842 // Since hashCode is usually polymorphic at call sites we can't do
duke@0 1843 // this optimization at the call site without a lot of work.
duke@0 1844 Label slowCase;
duke@0 1845 Register receiver = O0;
duke@0 1846 Register result = O0;
duke@0 1847 Register header = G3_scratch;
duke@0 1848 Register hash = G3_scratch; // overwrite header value with hash value
duke@0 1849 Register mask = G1; // to get hash field from header
duke@0 1850
duke@0 1851 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked.
duke@0 1852 // We depend on hash_mask being at most 32 bits and avoid the use of
duke@0 1853 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
duke@0 1854 // vm: see markOop.hpp.
duke@0 1855 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
duke@0 1856 __ sethi(markOopDesc::hash_mask, mask);
duke@0 1857 __ btst(markOopDesc::unlocked_value, header);
duke@0 1858 __ br(Assembler::zero, false, Assembler::pn, slowCase);
duke@0 1859 if (UseBiasedLocking) {
duke@0 1860 // Check if biased and fall through to runtime if so
duke@0 1861 __ delayed()->nop();
duke@0 1862 __ btst(markOopDesc::biased_lock_bit_in_place, header);
duke@0 1863 __ br(Assembler::notZero, false, Assembler::pn, slowCase);
duke@0 1864 }
duke@0 1865 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
duke@0 1866
duke@0 1867 // Check for a valid (non-zero) hash code and get its value.
duke@0 1868 #ifdef _LP64
duke@0 1869 __ srlx(header, markOopDesc::hash_shift, hash);
duke@0 1870 #else
duke@0 1871 __ srl(header, markOopDesc::hash_shift, hash);
duke@0 1872 #endif
duke@0 1873 __ andcc(hash, mask, hash);
duke@0 1874 __ br(Assembler::equal, false, Assembler::pn, slowCase);
duke@0 1875 __ delayed()->nop();
duke@0 1876
duke@0 1877 // leaf return.
duke@0 1878 __ retl();
duke@0 1879 __ delayed()->mov(hash, result);
duke@0 1880 __ bind(slowCase);
duke@0 1881 }
duke@0 1882 #endif // COMPILER1
duke@0 1883
duke@0 1884
duke@0 1885 // We have received a description of where all the java arg are located
duke@0 1886 // on entry to the wrapper. We need to convert these args to where
duke@0 1887 // the jni function will expect them. To figure out where they go
duke@0 1888 // we convert the java signature to a C signature by inserting
duke@0 1889 // the hidden arguments as arg[0] and possibly arg[1] (static method)
duke@0 1890
duke@0 1891 int total_c_args = total_in_args + 1;
duke@0 1892 if (method->is_static()) {
duke@0 1893 total_c_args++;
duke@0 1894 }
duke@0 1895
duke@0 1896 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
duke@0 1897 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
duke@0 1898
duke@0 1899 int argc = 0;
duke@0 1900 out_sig_bt[argc++] = T_ADDRESS;
duke@0 1901 if (method->is_static()) {
duke@0 1902 out_sig_bt[argc++] = T_OBJECT;
duke@0 1903 }
duke@0 1904
duke@0 1905 for (int i = 0; i < total_in_args ; i++ ) {
duke@0 1906 out_sig_bt[argc++] = in_sig_bt[i];
duke@0 1907 }
duke@0 1908
duke@0 1909 // Now figure out where the args must be stored and how much stack space
duke@0 1910 // they require (neglecting out_preserve_stack_slots but space for storing
duke@0 1911 // the 1st six register arguments). It's weird see int_stk_helper.
duke@0 1912 //
duke@0 1913 int out_arg_slots;
duke@0 1914 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
duke@0 1915
duke@0 1916 // Compute framesize for the wrapper. We need to handlize all oops in
duke@0 1917 // registers. We must create space for them here that is disjoint from
duke@0 1918 // the windowed save area because we have no control over when we might
duke@0 1919 // flush the window again and overwrite values that gc has since modified.
duke@0 1920 // (The live window race)
duke@0 1921 //
duke@0 1922 // We always just allocate 6 word for storing down these object. This allow
duke@0 1923 // us to simply record the base and use the Ireg number to decide which
duke@0 1924 // slot to use. (Note that the reg number is the inbound number not the
duke@0 1925 // outbound number).
duke@0 1926 // We must shuffle args to match the native convention, and include var-args space.
duke@0 1927
duke@0 1928 // Calculate the total number of stack slots we will need.
duke@0 1929
duke@0 1930 // First count the abi requirement plus all of the outgoing args
duke@0 1931 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
duke@0 1932
duke@0 1933 // Now the space for the inbound oop handle area
duke@0 1934
duke@0 1935 int oop_handle_offset = stack_slots;
duke@0 1936 stack_slots += 6*VMRegImpl::slots_per_word;
duke@0 1937
duke@0 1938 // Now any space we need for handlizing a klass if static method
duke@0 1939
duke@0 1940 int oop_temp_slot_offset = 0;
duke@0 1941 int klass_slot_offset = 0;
duke@0 1942 int klass_offset = -1;
duke@0 1943 int lock_slot_offset = 0;
duke@0 1944 bool is_static = false;
duke@0 1945
duke@0 1946 if (method->is_static()) {
duke@0 1947 klass_slot_offset = stack_slots;
duke@0 1948 stack_slots += VMRegImpl::slots_per_word;
duke@0 1949 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
duke@0 1950 is_static = true;
duke@0 1951 }
duke@0 1952
duke@0 1953 // Plus a lock if needed
duke@0 1954
duke@0 1955 if (method->is_synchronized()) {
duke@0 1956 lock_slot_offset = stack_slots;
duke@0 1957 stack_slots += VMRegImpl::slots_per_word;
duke@0 1958 }
duke@0 1959
duke@0 1960 // Now a place to save return value or as a temporary for any gpr -> fpr moves
duke@0 1961 stack_slots += 2;
duke@0 1962
duke@0 1963 // Ok The space we have allocated will look like:
duke@0 1964 //
duke@0 1965 //
duke@0 1966 // FP-> | |
duke@0 1967 // |---------------------|
duke@0 1968 // | 2 slots for moves |
duke@0 1969 // |---------------------|
duke@0 1970 // | lock box (if sync) |
duke@0 1971 // |---------------------| <- lock_slot_offset
duke@0 1972 // | klass (if static) |
duke@0 1973 // |---------------------| <- klass_slot_offset
duke@0 1974 // | oopHandle area |
duke@0 1975 // |---------------------| <- oop_handle_offset
duke@0 1976 // | outbound memory |
duke@0 1977 // | based arguments |
duke@0 1978 // | |
duke@0 1979 // |---------------------|
duke@0 1980 // | vararg area |
duke@0 1981 // |---------------------|
duke@0 1982 // | |
duke@0 1983 // SP-> | out_preserved_slots |
duke@0 1984 //
duke@0 1985 //
duke@0 1986
duke@0 1987
duke@0 1988 // Now compute actual number of stack words we need rounding to make
duke@0 1989 // stack properly aligned.
duke@0 1990 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
duke@0 1991
duke@0 1992 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
duke@0 1993
duke@0 1994 // Generate stack overflow check before creating frame
duke@0 1995 __ generate_stack_overflow_check(stack_size);
duke@0 1996
duke@0 1997 // Generate a new frame for the wrapper.
duke@0 1998 __ save(SP, -stack_size, SP);
duke@0 1999
duke@0 2000 int frame_complete = ((intptr_t)__ pc()) - start;
duke@0 2001
duke@0 2002 __ verify_thread();
duke@0 2003
duke@0 2004
duke@0 2005 //
duke@0 2006 // We immediately shuffle the arguments so that any vm call we have to
duke@0 2007 // make from here on out (sync slow path, jvmti, etc.) we will have
duke@0 2008 // captured the oops from our caller and have a valid oopMap for
duke@0 2009 // them.
duke@0 2010
duke@0 2011 // -----------------
duke@0 2012 // The Grand Shuffle
duke@0 2013 //
duke@0 2014 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
duke@0 2015 // (derived from JavaThread* which is in L7_thread_cache) and, if static,
duke@0 2016 // the class mirror instead of a receiver. This pretty much guarantees that
duke@0 2017 // register layout will not match. We ignore these extra arguments during
duke@0 2018 // the shuffle. The shuffle is described by the two calling convention
duke@0 2019 // vectors we have in our possession. We simply walk the java vector to
duke@0 2020 // get the source locations and the c vector to get the destinations.
duke@0 2021 // Because we have a new window and the argument registers are completely
duke@0 2022 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
duke@0 2023 // here.
duke@0 2024
duke@0 2025 // This is a trick. We double the stack slots so we can claim
duke@0 2026 // the oops in the caller's frame. Since we are sure to have
duke@0 2027 // more args than the caller doubling is enough to make
duke@0 2028 // sure we can capture all the incoming oop args from the
duke@0 2029 // caller.
duke@0 2030 //
duke@0 2031 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
duke@0 2032 int c_arg = total_c_args - 1;
duke@0 2033 // Record sp-based slot for receiver on stack for non-static methods
duke@0 2034 int receiver_offset = -1;
duke@0 2035
duke@0 2036 // We move the arguments backward because the floating point registers
duke@0 2037 // destination will always be to a register with a greater or equal register
duke@0 2038 // number or the stack.
duke@0 2039
duke@0 2040 #ifdef ASSERT
duke@0 2041 bool reg_destroyed[RegisterImpl::number_of_registers];
duke@0 2042 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
duke@0 2043 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
duke@0 2044 reg_destroyed[r] = false;
duke@0 2045 }
duke@0 2046 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
duke@0 2047 freg_destroyed[f] = false;
duke@0 2048 }
duke@0 2049
duke@0 2050 #endif /* ASSERT */
duke@0 2051
duke@0 2052 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) {
duke@0 2053
duke@0 2054 #ifdef ASSERT
duke@0 2055 if (in_regs[i].first()->is_Register()) {
duke@0 2056 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
duke@0 2057 } else if (in_regs[i].first()->is_FloatRegister()) {
duke@0 2058 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
duke@0 2059 }
duke@0 2060 if (out_regs[c_arg].first()->is_Register()) {
duke@0 2061 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
duke@0 2062 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
duke@0 2063 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
duke@0 2064 }
duke@0 2065 #endif /* ASSERT */
duke@0 2066
duke@0 2067 switch (in_sig_bt[i]) {
duke@0 2068 case T_ARRAY:
duke@0 2069 case T_OBJECT:
duke@0 2070 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
duke@0 2071 ((i == 0) && (!is_static)),
duke@0 2072 &receiver_offset);
duke@0 2073 break;
duke@0 2074 case T_VOID:
duke@0 2075 break;
duke@0 2076
duke@0 2077 case T_FLOAT:
duke@0 2078 float_move(masm, in_regs[i], out_regs[c_arg]);
duke@0 2079 break;
duke@0 2080
duke@0 2081 case T_DOUBLE:
duke@0 2082 assert( i + 1 < total_in_args &&
duke@0 2083 in_sig_bt[i + 1] == T_VOID &&
duke@0 2084 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
duke@0 2085 double_move(masm, in_regs[i], out_regs[c_arg]);
duke@0 2086 break;
duke@0 2087
duke@0 2088 case T_LONG :
duke@0 2089 long_move(masm, in_regs[i], out_regs[c_arg]);
duke@0 2090 break;
duke@0 2091
duke@0 2092 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
duke@0 2093
duke@0 2094 default:
duke@0 2095 move32_64(masm, in_regs[i], out_regs[c_arg]);
duke@0 2096 }
duke@0 2097 }
duke@0 2098
duke@0 2099 // Pre-load a static method's oop into O1. Used both by locking code and
duke@0 2100 // the normal JNI call code.
duke@0 2101 if (method->is_static()) {
duke@0 2102 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1);
duke@0 2103
duke@0 2104 // Now handlize the static class mirror in O1. It's known not-null.
duke@0 2105 __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
duke@0 2106 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
duke@0 2107 __ add(SP, klass_offset + STACK_BIAS, O1);
duke@0 2108 }
duke@0 2109
duke@0 2110
duke@0 2111 const Register L6_handle = L6;
duke@0 2112
duke@0 2113 if (method->is_synchronized()) {
duke@0 2114 __ mov(O1, L6_handle);
duke@0 2115 }
duke@0 2116
duke@0 2117 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
duke@0 2118 // except O6/O7. So if we must call out we must push a new frame. We immediately
duke@0 2119 // push a new frame and flush the windows.
duke@0 2120
duke@0 2121 #ifdef _LP64
duke@0 2122 intptr_t thepc = (intptr_t) __ pc();
duke@0 2123 {
duke@0 2124 address here = __ pc();
duke@0 2125 // Call the next instruction
duke@0 2126 __ call(here + 8, relocInfo::none);
duke@0 2127 __ delayed()->nop();
duke@0 2128 }
duke@0 2129 #else
duke@0 2130 intptr_t thepc = __ load_pc_address(O7, 0);
duke@0 2131 #endif /* _LP64 */
duke@0 2132
duke@0 2133 // We use the same pc/oopMap repeatedly when we call out
duke@0 2134 oop_maps->add_gc_map(thepc - start, map);
duke@0 2135
duke@0 2136 // O7 now has the pc loaded that we will use when we finally call to native.
duke@0 2137
duke@0 2138 // Save thread in L7; it crosses a bunch of VM calls below
duke@0 2139 // Don't use save_thread because it smashes G2 and we merely
duke@0 2140 // want to save a copy
duke@0 2141 __ mov(G2_thread, L7_thread_cache);
duke@0 2142
duke@0 2143
duke@0 2144 // If we create an inner frame once is plenty
duke@0 2145 // when we create it we must also save G2_thread
duke@0 2146 bool inner_frame_created = false;
duke@0 2147
duke@0 2148 // dtrace method entry support
duke@0 2149 {
duke@0 2150 SkipIfEqual skip_if(
duke@0 2151 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
duke@0 2152 // create inner frame
duke@0 2153 __ save_frame(0);
duke@0 2154 __ mov(G2_thread, L7_thread_cache);
duke@0 2155 __ set_oop_constant(JNIHandles::make_local(method()), O1);
duke@0 2156 __ call_VM_leaf(L7_thread_cache,
duke@0 2157 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
duke@0 2158 G2_thread, O1);
duke@0 2159 __ restore();
duke@0 2160 }
duke@0 2161
dcubed@606 2162 // RedefineClasses() tracing support for obsolete method entry
dcubed@606 2163 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
dcubed@606 2164 // create inner frame
dcubed@606 2165 __ save_frame(0);
dcubed@606 2166 __ mov(G2_thread, L7_thread_cache);
dcubed@606 2167 __ set_oop_constant(JNIHandles::make_local(method()), O1);
dcubed@606 2168 __ call_VM_leaf(L7_thread_cache,
dcubed@606 2169 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
dcubed@606 2170 G2_thread, O1);
dcubed@606 2171 __ restore();
dcubed@606 2172 }
dcubed@606 2173
duke@0 2174 // We are in the jni frame unless saved_frame is true in which case
duke@0 2175 // we are in one frame deeper (the "inner" frame). If we are in the
duke@0 2176 // "inner" frames the args are in the Iregs and if the jni frame then
duke@0 2177 // they are in the Oregs.
duke@0 2178 // If we ever need to go to the VM (for locking, jvmti) then
duke@0 2179 // we will always be in the "inner" frame.
duke@0 2180
duke@0 2181 // Lock a synchronized method
duke@0 2182 int lock_offset = -1; // Set if locked
duke@0 2183 if (method->is_synchronized()) {
duke@0 2184 Register Roop = O1;
duke@0 2185 const Register L3_box = L3;
duke@0 2186
duke@0 2187 create_inner_frame(masm, &inner_frame_created);
duke@0 2188
duke@0 2189 __ ld_ptr(I1, 0, O1);
duke@0 2190 Label done;
duke@0 2191
duke@0 2192 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
duke@0 2193 __ add(FP, lock_offset+STACK_BIAS, L3_box);
duke@0 2194 #ifdef ASSERT
duke@0 2195 if (UseBiasedLocking) {
duke@0 2196 // making the box point to itself will make it clear it went unused
duke@0 2197 // but also be obviously invalid
duke@0 2198 __ st_ptr(L3_box, L3_box, 0);
duke@0 2199 }
duke@0 2200 #endif // ASSERT
duke@0 2201 //
duke@0 2202 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
duke@0 2203 //
duke@0 2204 __ compiler_lock_object(Roop, L1, L3_box, L2);
duke@0 2205 __ br(Assembler::equal, false, Assembler::pt, done);
duke@0 2206 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
duke@0 2207
duke@0 2208
duke@0 2209 // None of the above fast optimizations worked so we have to get into the
duke@0 2210 // slow case of monitor enter. Inline a special case of call_VM that
duke@0 2211 // disallows any pending_exception.
duke@0 2212 __ mov(Roop, O0); // Need oop in O0
duke@0 2213 __ mov(L3_box, O1);
duke@0 2214
duke@0 2215 // Record last_Java_sp, in case the VM code releases the JVM lock.
duke@0 2216
duke@0 2217 __ set_last_Java_frame(FP, I7);
duke@0 2218
duke@0 2219 // do the call
duke@0 2220 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
duke@0 2221 __ delayed()->mov(L7_thread_cache, O2);
duke@0 2222
duke@0 2223 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@0 2224 __ reset_last_Java_frame();
duke@0 2225
duke@0 2226 #ifdef ASSERT
duke@0 2227 { Label L;
duke@0 2228 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
duke@0 2229 __ br_null(O0, false, Assembler::pt, L);
duke@0 2230 __ delayed()->nop();
duke@0 2231 __ stop("no pending exception allowed on exit from IR::monitorenter");
duke@0 2232 __ bind(L);
duke@0 2233 }
duke@0 2234 #endif
duke@0 2235 __ bind(done);
duke@0 2236 }
duke@0 2237
duke@0 2238
duke@0 2239 // Finally just about ready to make the JNI call
duke@0 2240
duke@0 2241 __ flush_windows();
duke@0 2242 if (inner_frame_created) {
duke@0 2243 __ restore();
duke@0 2244 } else {
duke@0 2245 // Store only what we need from this frame
duke@0 2246 // QQQ I think that non-v9 (like we care) we don't need these saves
duke@0 2247 // either as the flush traps and the current window goes too.
duke@0 2248 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
duke@0 2249 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
duke@0 2250 }
duke@0 2251
duke@0 2252 // get JNIEnv* which is first argument to native
duke@0 2253
duke@0 2254 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
duke@0 2255
duke@0 2256 // Use that pc we placed in O7 a while back as the current frame anchor
duke@0 2257
duke@0 2258 __ set_last_Java_frame(SP, O7);
duke@0 2259
duke@0 2260 // Transition from _thread_in_Java to _thread_in_native.
duke@0 2261 __ set(_thread_in_native, G3_scratch);
twisti@720 2262 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
duke@0 2263
duke@0 2264 // We flushed the windows ages ago now mark them as flushed
duke@0 2265
duke@0 2266 // mark windows as flushed
duke@0 2267 __ set(JavaFrameAnchor::flushed, G3_scratch);
duke@0 2268
twisti@720 2269 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
duke@0 2270
duke@0 2271 #ifdef _LP64
twisti@720 2272 AddressLiteral dest(method->native_function());
duke@0 2273 __ relocate(relocInfo::runtime_call_type);
twisti@720 2274 __ jumpl_to(dest, O7, O7);
duke@0 2275 #else
duke@0 2276 __ call(method->native_function(), relocInfo::runtime_call_type);
duke@0 2277 #endif
duke@0 2278 __ delayed()->st(G3_scratch, flags);
duke@0 2279
duke@0 2280 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@0 2281
duke@0 2282 // Unpack native results. For int-types, we do any needed sign-extension
duke@0 2283 // and move things into I0. The return value there will survive any VM
duke@0 2284 // calls for blocking or unlocking. An FP or OOP result (handle) is done
duke@0 2285 // specially in the slow-path code.
duke@0 2286 switch (ret_type) {
duke@0 2287 case T_VOID: break; // Nothing to do!
duke@0 2288 case T_FLOAT: break; // Got it where we want it (unless slow-path)
duke@0 2289 case T_DOUBLE: break; // Got it where we want it (unless slow-path)
duke@0 2290 // In 64 bits build result is in O0, in O0, O1 in 32bit build
duke@0 2291 case T_LONG:
duke@0 2292 #ifndef _LP64
duke@0 2293 __ mov(O1, I1);
duke@0 2294 #endif
duke@0 2295 // Fall thru
duke@0 2296 case T_OBJECT: // Really a handle
duke@0 2297 case T_ARRAY:
duke@0 2298 case T_INT:
duke@0 2299 __ mov(O0, I0);
duke@0 2300 break;
duke@0 2301 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
duke@0 2302 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break;
duke@0 2303 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value!
duke@0 2304 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break;
duke@0 2305 break; // Cannot de-handlize until after reclaiming jvm_lock
duke@0 2306 default:
duke@0 2307 ShouldNotReachHere();
duke@0 2308 }
duke@0 2309
duke@0 2310 // must we block?
duke@0 2311
duke@0 2312 // Block, if necessary, before resuming in _thread_in_Java state.
duke@0 2313 // In order for GC to work, don't clear the last_Java_sp until after blocking.
duke@0 2314 { Label no_block;
twisti@720 2315 AddressLiteral sync_state(SafepointSynchronize::address_of_state());
duke@0 2316
duke@0 2317 // Switch thread to "native transition" state before reading the synchronization state.
duke@0 2318 // This additional state is necessary because reading and testing the synchronization
duke@0 2319 // state is not atomic w.r.t. GC, as this scenario demonstrates:
duke@0 2320 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
duke@0 2321 // VM thread changes sync state to synchronizing and suspends threads for GC.
duke@0 2322 // Thread A is resumed to finish this native method, but doesn't block here since it
duke@0 2323 // didn't see any synchronization is progress, and escapes.
duke@0 2324 __ set(_thread_in_native_trans, G3_scratch);
twisti@720 2325 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
duke@0 2326 if(os::is_MP()) {
duke@0 2327 if (UseMembar) {
duke@0 2328 // Force this write out before the read below
duke@0 2329 __ membar(Assembler::StoreLoad);
duke@0 2330 } else {
duke@0 2331 // Write serialization page so VM thread can do a pseudo remote membar.
duke@0 2332 // We use the current thread pointer to calculate a thread specific
duke@0 2333 // offset to write to within the page. This minimizes bus traffic
duke@0 2334 // due to cache line collision.
duke@0 2335 __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
duke@0 2336 }
duke@0 2337 }
duke@0 2338 __ load_contents(sync_state, G3_scratch);
duke@0 2339 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
duke@0 2340
duke@0 2341 Label L;
twisti@720 2342 Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
duke@0 2343 __ br(Assembler::notEqual, false, Assembler::pn, L);
twisti@720 2344 __ delayed()->ld(suspend_state, G3_scratch);
duke@0 2345 __ cmp(G3_scratch, 0);
duke@0 2346 __ br(Assembler::equal, false, Assembler::pt, no_block);
duke@0 2347 __ delayed()->nop();
duke@0 2348 __ bind(L);
duke@0 2349
duke@0 2350 // Block. Save any potential method result value before the operation and
duke@0 2351 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
duke@0 2352 // lets us share the oopMap we used when we went native rather the create
duke@0 2353 // a distinct one for this pc
duke@0 2354 //
duke@0 2355 save_native_result(masm, ret_type, stack_slots);
duke@0 2356 __ call_VM_leaf(L7_thread_cache,
duke@0 2357 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
duke@0 2358 G2_thread);
duke@0 2359
duke@0 2360 // Restore any method result value
duke@0 2361 restore_native_result(masm, ret_type, stack_slots);
duke@0 2362 __ bind(no_block);
duke@0 2363 }
duke@0 2364
duke@0 2365 // thread state is thread_in_native_trans. Any safepoint blocking has already
duke@0 2366 // happened so we can now change state to _thread_in_Java.
duke@0 2367
duke@0 2368
duke@0 2369 __ set(_thread_in_Java, G3_scratch);
twisti@720 2370 __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
duke@0 2371
duke@0 2372
duke@0 2373 Label no_reguard;
twisti@720 2374 __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
duke@0 2375 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled);
duke@0 2376 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard);
duke@0 2377 __ delayed()->nop();
duke@0 2378
duke@0 2379 save_native_result(masm, ret_type, stack_slots);
duke@0 2380 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
duke@0 2381 __ delayed()->nop();
duke@0 2382
duke@0 2383 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@0 2384 restore_native_result(masm, ret_type, stack_slots);
duke@0 2385
duke@0 2386 __ bind(no_reguard);
duke@0 2387
duke@0 2388 // Handle possible exception (will unlock if necessary)
duke@0 2389
duke@0 2390 // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
duke@0 2391
duke@0 2392 // Unlock
duke@0 2393 if (method->is_synchronized()) {
duke@0 2394 Label done;
duke@0 2395 Register I2_ex_oop = I2;
duke@0 2396 const Register L3_box = L3;
duke@0 2397 // Get locked oop from the handle we passed to jni
duke@0 2398 __ ld_ptr(L6_handle, 0, L4);
duke@0 2399 __ add(SP, lock_offset+STACK_BIAS, L3_box);
duke@0 2400 // Must save pending exception around the slow-path VM call. Since it's a
duke@0 2401 // leaf call, the pending exception (if any) can be kept in a register.
duke@0 2402 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
duke@0 2403 // Now unlock
duke@0 2404 // (Roop, Rmark, Rbox, Rscratch)
duke@0 2405 __ compiler_unlock_object(L4, L1, L3_box, L2);
duke@0 2406 __ br(Assembler::equal, false, Assembler::pt, done);
duke@0 2407 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
duke@0 2408
duke@0 2409 // save and restore any potential method result value around the unlocking
duke@0 2410 // operation. Will save in I0 (or stack for FP returns).
duke@0 2411 save_native_result(masm, ret_type, stack_slots);
duke@0 2412
duke@0 2413 // Must clear pending-exception before re-entering the VM. Since this is
duke@0 2414 // a leaf call, pending-exception-oop can be safely kept in a register.
duke@0 2415 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
duke@0 2416
duke@0 2417 // slow case of monitor enter. Inline a special case of call_VM that
duke@0 2418 // disallows any pending_exception.
duke@0 2419 __ mov(L3_box, O1);
duke@0 2420
duke@0 2421 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
duke@0 2422 __ delayed()->mov(L4, O0); // Need oop in O0
duke@0 2423
duke@0 2424 __ restore_thread(L7_thread_cache); // restore G2_thread
duke@0 2425
duke@0 2426 #ifdef ASSERT
duke@0 2427 { Label L;
duke@0 2428 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
duke@0 2429 __ br_null(O0, false, Assembler::pt, L);
duke@0 2430 __ delayed()->nop();
duke@0 2431 __ stop("no pending exception allowed on exit from IR::monitorexit");
duke@0 2432 __ bind(L);
duke@0 2433 }
duke@0 2434 #endif
duke@0 2435 restore_native_result(masm, ret_type, stack_slots);
duke@0 2436 // check_forward_pending_exception jump to forward_exception if any pending
duke@0 2437 // exception is set. The forward_exception routine expects to see the
duke@0 2438 // exception in pending_exception and not in a register. Kind of clumsy,
duke@0 2439 // since all folks who branch to forward_exception must have tested
duke@0 2440 // pending_exception first and hence have it in a register already.
duke@0 2441 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
duke@0 2442 __ bind(done);
duke@0 2443 }
duke@0 2444
duke@0 2445 // Tell dtrace about this method exit
duke@0 2446 {
duke@0 2447 SkipIfEqual skip_if(
duke@0 2448 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
duke@0 2449 save_native_result(masm, ret_type, stack_slots);
duke@0 2450 __ set_oop_constant(JNIHandles::make_local(method()), O1);
duke@0 2451 __ call_VM_leaf(L7_thread_cache,
duke@0 2452 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
duke@0 2453 G2_thread, O1);
duke@0 2454 restore_native_result(masm, ret_type, stack_slots);
duke@0 2455 }
duke@0 2456
duke@0 2457 // Clear "last Java frame" SP and PC.
duke@0 2458 __ verify_thread(); // G2_thread must be correct
duke@0 2459 __ reset_last_Java_frame();
duke@0 2460
duke@0 2461 // Unpack oop result
duke@0 2462 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
duke@0 2463 Label L;
duke@0 2464 __ addcc(G0, I0, G0);
duke@0 2465 __ brx(Assembler::notZero, true, Assembler::pt, L);
duke@0 2466 __ delayed()->ld_ptr(I0, 0, I0);
duke@0 2467 __ mov(G0, I0);
duke@0 2468 __ bind(L);
duke@0 2469 __ verify_oop(I0);
duke@0 2470 }
duke@0 2471
duke@0 2472 // reset handle block
duke@0 2473 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
duke@0 2474 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes());
duke@0 2475
duke@0 2476 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
duke@0 2477 check_forward_pending_exception(masm, G3_scratch);
duke@0 2478
duke@0 2479
duke@0 2480 // Return
duke@0 2481
duke@0 2482 #ifndef _LP64
duke@0 2483 if (ret_type == T_LONG) {
duke@0 2484
duke@0 2485 // Must leave proper result in O0,O1 and G1 (c2/tiered only)
duke@0 2486 __ sllx(I0, 32, G1); // Shift bits into high G1
duke@0 2487 __ srl (I1, 0, I1); // Zero extend O1 (harmless?)
duke@0 2488 __ or3 (I1, G1, G1); // OR 64 bits into G1
duke@0 2489 }
duke@0 2490 #endif
duke@0 2491
duke@0 2492 __ ret();
duke@0 2493 __ delayed()->restore();
duke@0 2494
duke@0 2495 __ flush();
duke@0 2496
duke@0 2497 nmethod *nm = nmethod::new_native_nmethod(method,
duke@0 2498 masm->code(),
duke@0 2499 vep_offset,
duke@0 2500 frame_complete,
duke@0 2501 stack_slots / VMRegImpl::slots_per_word,
duke@0 2502 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
duke@0 2503 in_ByteSize(lock_offset),
duke@0 2504 oop_maps);
duke@0 2505 return nm;
duke@0 2506
duke@0 2507 }
duke@0 2508
kamg@124 2509 #ifdef HAVE_DTRACE_H
kamg@124 2510 // ---------------------------------------------------------------------------
kamg@124 2511 // Generate a dtrace nmethod for a given signature. The method takes arguments
kamg@124 2512 // in the Java compiled code convention, marshals them to the native
kamg@124 2513 // abi and then leaves nops at the position you would expect to call a native
kamg@124 2514 // function. When the probe is enabled the nops are replaced with a trap
kamg@124 2515 // instruction that dtrace inserts and the trace will cause a notification
kamg@124 2516 // to dtrace.
kamg@124 2517 //
kamg@124 2518 // The probes are only able to take primitive types and java/lang/String as
kamg@124 2519 // arguments. No other java types are allowed. Strings are converted to utf8
kamg@124 2520 // strings so that from dtrace point of view java strings are converted to C
kamg@124 2521 // strings. There is an arbitrary fixed limit on the total space that a method
kamg@124 2522 // can use for converting the strings. (256 chars per string in the signature).
kamg@124 2523 // So any java string larger then this is truncated.
kamg@124 2524
kamg@124 2525 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
kamg@124 2526 static bool offsets_initialized = false;
kamg@124 2527
kamg@124 2528 static VMRegPair reg64_to_VMRegPair(Register r) {
kamg@124 2529 VMRegPair ret;
kamg@124 2530 if (wordSize == 8) {
kamg@124 2531 ret.set2(r->as_VMReg());
kamg@124 2532 } else {
kamg@124 2533 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
kamg@124 2534 }
kamg@124 2535 return ret;
kamg@124 2536 }
kamg@124 2537
kamg@124 2538
kamg@124 2539 nmethod *SharedRuntime::generate_dtrace_nmethod(
kamg@124 2540 MacroAssembler *masm, methodHandle method) {
kamg@124 2541
kamg@124 2542
kamg@124 2543 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
kamg@124 2544 // be single threaded in this method.
kamg@124 2545 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
kamg@124 2546
kamg@124 2547 // Fill in the signature array, for the calling-convention call.
kamg@124 2548 int total_args_passed = method->size_of_parameters();
kamg@124 2549
kamg@124 2550 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
kamg@124 2551 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
kamg@124 2552
kamg@124 2553 // The signature we are going to use for the trap that dtrace will see
kamg@124 2554 // java/lang/String is converted. We drop "this" and any other object
kamg@124 2555 // is converted to NULL. (A one-slot java/lang/Long object reference
kamg@124 2556 // is converted to a two-slot long, which is why we double the allocation).
kamg@124 2557 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
kamg@124 2558 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
kamg@124 2559
kamg@124 2560 int i=0;
kamg@124 2561 int total_strings = 0;
kamg@124 2562 int first_arg_to_pass = 0;
kamg@124 2563 int total_c_args = 0;
kamg@124 2564
kamg@124 2565 // Skip the receiver as dtrace doesn't want to see it
kamg@124 2566 if( !method->is_static() ) {
kamg@124 2567 in_sig_bt[i++] = T_OBJECT;
kamg@124 2568 first_arg_to_pass = 1;
kamg@124 2569 }
kamg@124 2570
kamg@124 2571 SignatureStream ss(method->signature());
kamg@124 2572 for ( ; !ss.at_return_type(); ss.next()) {
kamg@124 2573 BasicType bt = ss.type();
kamg@124 2574 in_sig_bt[i++] = bt; // Collect remaining bits of signature
kamg@124 2575 out_sig_bt[total_c_args++] = bt;
kamg@124 2576 if( bt == T_OBJECT) {
kamg@124 2577 symbolOop s = ss.as_symbol_or_null();
kamg@124 2578 if (s == vmSymbols::java_lang_String()) {
kamg@124 2579 total_strings++;
kamg@124 2580 out_sig_bt[total_c_args-1] = T_ADDRESS;
kamg@124 2581 } else if (s == vmSymbols::java_lang_Boolean() ||
kamg@124 2582 s == vmSymbols::java_lang_Byte()) {
kamg@124 2583 out_sig_bt[total_c_args-1] = T_BYTE;
kamg@124 2584 } else if (s == vmSymbols::java_lang_Character() ||
kamg@124 2585 s == vmSymbols::java_lang_Short()) {
kamg@124 2586 out_sig_bt[total_c_args-1] = T_SHORT;
kamg@124 2587 } else if (s == vmSymbols::java_lang_Integer() ||
kamg@124 2588 s == vmSymbols::java_lang_Float()) {
kamg@124 2589 out_sig_bt[total_c_args-1] = T_INT;
kamg@124 2590 } else if (s == vmSymbols::java_lang_Long() ||
kamg@124 2591 s == vmSymbols::java_lang_Double()) {
kamg@124 2592 out_sig_bt[total_c_args-1] = T_LONG;
kamg@124 2593 out_sig_bt[total_c_args++] = T_VOID;
kamg@124 2594 }
kamg@124 2595 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
kamg@124 2596 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
kamg@124 2597 // We convert double to long
kamg@124 2598 out_sig_bt[total_c_args-1] = T_LONG;
kamg@124 2599 out_sig_bt[total_c_args++] = T_VOID;
kamg@124 2600 } else if ( bt == T_FLOAT) {
kamg@124 2601 // We convert float to int
kamg@124 2602 out_sig_bt[total_c_args-1] = T_INT;
kamg@124 2603 }
kamg@124 2604 }
kamg@124 2605
kamg@124 2606 assert(i==total_args_passed, "validly parsed signature");
kamg@124 2607
kamg@124 2608 // Now get the compiled-Java layout as input arguments
kamg@124 2609 int comp_args_on_stack;
kamg@124 2610 comp_args_on_stack = SharedRuntime::java_calling_convention(
kamg@124 2611 in_sig_bt, in_regs, total_args_passed, false);
kamg@124 2612
kamg@124 2613 // We have received a description of where all the java arg are located
kamg@124 2614 // on entry to the wrapper. We need to convert these args to where
kamg@124 2615 // the a native (non-jni) function would expect them. To figure out
kamg@124 2616 // where they go we convert the java signature to a C signature and remove
kamg@124 2617 // T_VOID for any long/double we might have received.
kamg@124 2618
kamg@124 2619
kamg@124 2620 // Now figure out where the args must be stored and how much stack space
kamg@124 2621 // they require (neglecting out_preserve_stack_slots but space for storing
kamg@124 2622 // the 1st six register arguments). It's weird see int_stk_helper.
kamg@124 2623 //
kamg@124 2624 int out_arg_slots;
kamg@124 2625 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
kamg@124 2626
kamg@124 2627 // Calculate the total number of stack slots we will need.
kamg@124 2628
kamg@124 2629 // First count the abi requirement plus all of the outgoing args
kamg@124 2630 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
kamg@124 2631
kamg@124 2632 // Plus a temp for possible converion of float/double/long register args
kamg@124 2633
kamg@124 2634 int conversion_temp = stack_slots;
kamg@124 2635 stack_slots += 2;
kamg@124 2636
kamg@124 2637
kamg@124 2638 // Now space for the string(s) we must convert
kamg@124 2639
kamg@124 2640 int string_locs = stack_slots;
kamg@124 2641 stack_slots += total_strings *
kamg@124 2642 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
kamg@124 2643
kamg@124 2644 // Ok The space we have allocated will look like:
kamg@124 2645 //
kamg@124 2646 //
kamg@124 2647 // FP-> | |
kamg@124 2648 // |---------------------|
kamg@124 2649 // | string[n] |
kamg@124 2650 // |---------------------| <- string_locs[n]
kamg@124 2651 // | string[n-1] |
kamg@124 2652 // |---------------------| <- string_locs[n-1]
kamg@124 2653 // | ... |
kamg@124 2654 // | ... |
kamg@124 2655 // |---------------------| <- string_locs[1]
kamg@124 2656 // | string[0] |
kamg@124 2657 // |---------------------| <- string_locs[0]
kamg@124 2658 // | temp |
kamg@124 2659 // |---------------------| <- conversion_temp
kamg@124 2660 // | outbound memory |
kamg@124 2661 // | based arguments |
kamg@124 2662 // | |
kamg@124 2663 // |---------------------|
kamg@124 2664 // | |
kamg@124 2665 // SP-> | out_preserved_slots |
kamg@124 2666 //
kamg@124 2667 //
kamg@124 2668
kamg@124 2669 // Now compute actual number of stack words we need rounding to make
kamg@124 2670 // stack properly aligned.
kamg@124 2671 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
kamg@124 2672
kamg@124 2673 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
kamg@124 2674
kamg@124 2675 intptr_t start = (intptr_t)__ pc();
kamg@124 2676
kamg@124 2677 // First thing make an ic check to see if we should even be here
kamg@124 2678
kamg@124 2679 {
kamg@124 2680 Label L;
kamg@124 2681 const Register temp_reg = G3_scratch;
twisti@720 2682 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
kamg@124 2683 __ verify_oop(O0);
kamg@124 2684 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
kamg@124 2685 __ cmp(temp_reg, G5_inline_cache_reg);
kamg@124 2686 __ brx(Assembler::equal, true, Assembler::pt, L);
kamg@124 2687 __ delayed()->nop();
kamg@124 2688
twisti@720 2689 __ jump_to(ic_miss, temp_reg);
kamg@124 2690 __ delayed()->nop();
kamg@124 2691 __ align(CodeEntryAlignment);
kamg@124 2692 __ bind(L);
kamg@124 2693 }
kamg@124 2694
kamg@124 2695 int vep_offset = ((intptr_t)__ pc()) - start;
kamg@124 2696
kamg@124 2697
kamg@124 2698 // The instruction at the verified entry point must be 5 bytes or longer
kamg@124 2699 // because it can be patched on the fly by make_non_entrant. The stack bang
kamg@124 2700 // instruction fits that requirement.
kamg@124 2701
kamg@124 2702 // Generate stack overflow check before creating frame
kamg@124 2703 __ generate_stack_overflow_check(stack_size);
kamg@124 2704
kamg@124 2705 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
kamg@124 2706 "valid size for make_non_entrant");
kamg@124 2707
kamg@124 2708 // Generate a new frame for the wrapper.
kamg@124 2709 __ save(SP, -stack_size, SP);
kamg@124 2710
kamg@124 2711 // Frame is now completed as far a size and linkage.
kamg@124 2712
kamg@124 2713 int frame_complete = ((intptr_t)__ pc()) - start;
kamg@124 2714
kamg@124 2715 #ifdef ASSERT
kamg@124 2716 bool reg_destroyed[RegisterImpl::number_of_registers];
kamg@124 2717 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
kamg@124 2718 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
kamg@124 2719 reg_destroyed[r] = false;
kamg@124 2720 }
kamg@124 2721 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
kamg@124 2722 freg_destroyed[f] = false;
kamg@124 2723 }
kamg@124 2724
kamg@124 2725 #endif /* ASSERT */
kamg@124 2726
kamg@124 2727 VMRegPair zero;
kamg@182 2728 const Register g0 = G0; // without this we get a compiler warning (why??)
kamg@182 2729 zero.set2(g0->as_VMReg());
kamg@124 2730
kamg@124 2731 int c_arg, j_arg;
kamg@124 2732
kamg@124 2733 Register conversion_off = noreg;
kamg@124 2734
kamg@124 2735 for (j_arg = first_arg_to_pass, c_arg = 0 ;
kamg@124 2736 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
kamg@124 2737
kamg@124 2738 VMRegPair src = in_regs[j_arg];
kamg@124 2739 VMRegPair dst = out_regs[c_arg];
kamg@124 2740
kamg@124 2741 #ifdef ASSERT
kamg@124 2742 if (src.first()->is_Register()) {
kamg@124 2743 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
kamg@124 2744 } else if (src.first()->is_FloatRegister()) {
kamg@124 2745 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
kamg@124 2746 FloatRegisterImpl::S)], "ack!");
kamg@124 2747 }
kamg@124 2748 if (dst.first()->is_Register()) {
kamg@124 2749 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
kamg@124 2750 } else if (dst.first()->is_FloatRegister()) {
kamg@124 2751 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
kamg@124 2752 FloatRegisterImpl::S)] = true;
kamg@124 2753 }
kamg@124 2754 #endif /* ASSERT */
kamg@124 2755
kamg@124 2756 switch (in_sig_bt[j_arg]) {
kamg@124 2757 case T_ARRAY:
kamg@124 2758 case T_OBJECT:
kamg@124 2759 {
kamg@124 2760 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
kamg@124 2761 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
kamg@124 2762 // need to unbox a one-slot value
kamg@124 2763 Register in_reg = L0;
kamg@124 2764 Register tmp = L2;
kamg@124 2765 if ( src.first()->is_reg() ) {
kamg@124 2766 in_reg = src.first()->as_Register();
kamg@124 2767 } else {
kamg@124 2768 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
kamg@124 2769 "must be");
kamg@124 2770 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
kamg@124 2771 }
kamg@124 2772 // If the final destination is an acceptable register
kamg@124 2773 if ( dst.first()->is_reg() ) {
kamg@124 2774 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
kamg@124 2775 tmp = dst.first()->as_Register();
kamg@124 2776 }
kamg@124 2777 }
kamg@124 2778
kamg@124 2779 Label skipUnbox;
kamg@124 2780 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
kamg@124 2781 __ mov(G0, tmp->successor());
kamg@124 2782 }
kamg@124 2783 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
kamg@124 2784 __ delayed()->mov(G0, tmp);
kamg@124 2785
kvn@153 2786 BasicType bt = out_sig_bt[c_arg];
kvn@153 2787 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
kvn@153 2788 switch (bt) {
kamg@124 2789 case T_BYTE:
kamg@124 2790 __ ldub(in_reg, box_offset, tmp); break;
kamg@124 2791 case T_SHORT:
kamg@124 2792 __ lduh(in_reg, box_offset, tmp); break;
kamg@124 2793 case T_INT:
kamg@124 2794 __ ld(in_reg, box_offset, tmp); break;
kamg@124 2795 case T_LONG:
kamg@124 2796 __ ld_long(in_reg, box_offset, tmp); break;
kamg@124 2797 default: ShouldNotReachHere();
kamg@124 2798 }
kamg@124 2799
kamg@124 2800 __ bind(skipUnbox);
kamg@124 2801 // If tmp wasn't final destination copy to final destination
kamg@124 2802 if (tmp == L2) {
kamg@124 2803 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
kamg@124 2804 if (out_sig_bt[c_arg] == T_LONG) {
kamg@124 2805 long_move(masm, tmp_as_VM, dst);
kamg@124 2806 } else {
kamg@124 2807 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
kamg@124 2808 }
kamg@124 2809 }
kamg@124 2810 if (out_sig_bt[c_arg] == T_LONG) {
kamg@124 2811 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
kamg@124 2812 ++c_arg; // move over the T_VOID to keep the loop indices in sync
kamg@124 2813 }
kamg@124 2814 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
kamg@124 2815 Register s =
kamg@124 2816 src.first()->is_reg() ? src.first()->as_Register() : L2;
kamg@124 2817 Register d =
kamg@124 2818 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
kamg@124 2819
kamg@124 2820 // We store the oop now so that the conversion pass can reach
kamg@124 2821 // while in the inner frame. This will be the only store if
kamg@124 2822 // the oop is NULL.
kamg@124 2823 if (s != L2) {
kamg@124 2824 // src is register
kamg@124 2825 if (d != L2) {
kamg@124 2826 // dst is register
kamg@124 2827 __ mov(s, d);
kamg@124 2828 } else {
kamg@124 2829 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@124 2830 STACK_BIAS), "must be");
kamg@124 2831 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@124 2832 }
kamg@124 2833 } else {
kamg@124 2834 // src not a register
kamg@124 2835 assert(Assembler::is_simm13(reg2offset(src.first()) +
kamg@124 2836 STACK_BIAS), "must be");
kamg@124 2837 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
kamg@124 2838 if (d == L2) {
kamg@124 2839 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@124 2840 STACK_BIAS), "must be");
kamg@124 2841 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@124 2842 }
kamg@124 2843 }
kamg@124 2844 } else if (out_sig_bt[c_arg] != T_VOID) {
kamg@124 2845 // Convert the arg to NULL
kamg@124 2846 if (dst.first()->is_reg()) {
kamg@124 2847 __ mov(G0, dst.first()->as_Register());
kamg@124 2848 } else {
kamg@124 2849 assert(Assembler::is_simm13(reg2offset(dst.first()) +
kamg@124 2850 STACK_BIAS), "must be");
kamg@124 2851 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
kamg@124 2852 }
kamg@124 2853 }
kamg@124 2854 }
kamg@124 2855 break;
kamg@124 2856 case T_VOID:
kamg@124 2857 break;
kamg@124 2858
kamg@124 2859 case T_FLOAT:
kamg@124 2860 if (src.first()->is_stack()) {
kamg@124 2861 // Stack to stack/reg is simple
kamg@124 2862 move32_64(masm, src, dst);
kamg@124 2863 } else {
kamg@124 2864 if (dst.first()->is_reg()) {
kamg@124 2865 // freg -> reg
kamg@124 2866 int off =
kamg@124 2867 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@124 2868 Register d = dst.first()->as_Register();
kamg@124 2869 if (Assembler::is_simm13(off)) {
kamg@124 2870 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@124 2871 SP, off);
kamg@124 2872 __ ld(SP, off, d);
kamg@124 2873 } else {
kamg@124 2874 if (conversion_off == noreg) {
kamg@124 2875 __ set(off, L6);
kamg@124 2876 conversion_off = L6;
kamg@124 2877 }
kamg@124 2878 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@124 2879 SP, conversion_off);
kamg@124 2880 __ ld(SP, conversion_off , d);
kamg@124 2881 }
kamg@124 2882 } else {
kamg@124 2883 // freg -> mem
kamg@124 2884 int off = STACK_BIAS + reg2offset(dst.first());
kamg@124 2885 if (Assembler::is_simm13(off)) {
kamg@124 2886 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@124 2887 SP, off);
kamg@124 2888 } else {
kamg@124 2889 if (conversion_off == noreg) {
kamg@124 2890 __ set(off, L6);
kamg@124 2891 conversion_off = L6;
kamg@124 2892 }
kamg@124 2893 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
kamg@124 2894 SP, conversion_off);
kamg@124 2895 }
kamg@124 2896 }
kamg@124 2897 }
kamg@124 2898 break;
kamg@124 2899
kamg@124 2900 case T_DOUBLE:
kamg@124 2901 assert( j_arg + 1 < total_args_passed &&
kamg@124 2902 in_sig_bt[j_arg + 1] == T_VOID &&
kamg@124 2903 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
kamg@124 2904 if (src.first()->is_stack()) {
kamg@124 2905 // Stack to stack/reg is simple
kamg@124 2906 long_move(masm, src, dst);
kamg@124 2907 } else {
kamg@124 2908 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
kamg@124 2909
kamg@124 2910 // Destination could be an odd reg on 32bit in which case
kamg@124 2911 // we can't load direct to the destination.
kamg@124 2912
kamg@124 2913 if (!d->is_even() && wordSize == 4) {
kamg@124 2914 d = L2;
kamg@124 2915 }
kamg@124 2916 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@124 2917 if (Assembler::is_simm13(off)) {
kamg@124 2918 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
kamg@124 2919 SP, off);
kamg@124 2920 __ ld_long(SP, off, d);
kamg@124 2921 } else {
kamg@124 2922 if (conversion_off == noreg) {
kamg@124 2923 __ set(off, L6);
kamg@124 2924 conversion_off = L6;
kamg@124 2925 }
kamg@124 2926 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
kamg@124 2927 SP, conversion_off);
kamg@124 2928 __ ld_long(SP, conversion_off, d);
kamg@124 2929 }
kamg@124 2930 if (d == L2) {
kamg@124 2931 long_move(masm, reg64_to_VMRegPair(L2), dst);
kamg@124 2932 }
kamg@124 2933 }
kamg@124 2934 break;
kamg@124 2935
kamg@124 2936 case T_LONG :
kamg@124 2937 // 32bit can't do a split move of something like g1 -> O0, O1
kamg@124 2938 // so use a memory temp
kamg@124 2939 if (src.is_single_phys_reg() && wordSize == 4) {
kamg@124 2940 Register tmp = L2;
kamg@124 2941 if (dst.first()->is_reg() &&
kamg@124 2942 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
kamg@124 2943 tmp = dst.first()->as_Register();
kamg@124 2944 }
kamg@124 2945
kamg@124 2946 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
kamg@124 2947 if (Assembler::is_simm13(off)) {
kamg@124 2948 __ stx(src.first()->as_Register(), SP, off);
kamg@124 2949 __ ld_long(SP, off, tmp);
kamg@124 2950 } else {
kamg@124 2951 if (conversion_off == noreg) {
kamg@124 2952 __ set(off, L6);
kamg@124 2953 conversion_off = L6;
kamg@124 2954 }
kamg@124 2955 __ stx(src.first()->as_Register(), SP, conversion_off);
kamg@124 2956 __ ld_long(SP, conversion_off, tmp);
kamg@124 2957 }
kamg@124 2958
kamg@124 2959 if (tmp == L2) {
kamg@124 2960 long_move(masm, reg64_to_VMRegPair(L2), dst);
kamg@124 2961 }
kamg@124 2962 } else {
kamg@124 2963 long_move(masm, src, dst);
kamg@124 2964 }
kamg@124 2965 break;
kamg@124 2966
kamg@124 2967 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
kamg@124 2968
kamg@124 2969 default:
kamg@124 2970 move32_64(masm, src, dst);
kamg@124 2971 }
kamg@124 2972 }
kamg@124 2973
kamg@124 2974
kamg@124 2975 // If we have any strings we must store any register based arg to the stack
kamg@124 2976 // This includes any still live xmm registers too.
kamg@124 2977
kamg@124 2978 if (total_strings > 0 ) {
kamg@124 2979
kamg@124 2980 // protect all the arg registers
kamg@124 2981 __ save_frame(0);
kamg@124 2982 __ mov(G2_thread, L7_thread_cache);
kamg@124 2983 const Register L2_string_off = L2;
kamg@124 2984
kamg@124 2985 // Get first string offset
kamg@124 2986 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
kamg@124 2987
kamg@124 2988 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
kamg@124 2989 if (out_sig_bt[c_arg] == T_ADDRESS) {
kamg@124 2990
kamg@124 2991 VMRegPair dst = out_regs[c_arg];
kamg@124 2992 const Register d = dst.first()->is_reg() ?
kamg@124 2993 dst.first()->as_Register()->after_save() : noreg;
kamg@124 2994
kamg@124 2995 // It's a string the oop and it was already copied to the out arg
kamg@124 2996 // position
kamg@124 2997 if (d != noreg) {
kamg@124 2998 __ mov(d, O0);
kamg@124 2999 } else {
kamg@124 3000 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
kamg@124 3001 "must be");
kamg@124 3002 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
kamg@124 3003 }
kamg@124 3004 Label skip;
kamg@124 3005
kamg@124 3006 __ br_null(O0, false, Assembler::pn, skip);
kamg@124 3007 __ delayed()->add(FP, L2_string_off, O1);
kamg@124 3008
kamg@124 3009 if (d != noreg) {
kamg@124 3010 __ mov(O1, d);
kamg@124 3011 } else {
kamg@124 3012 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
kamg@124 3013 "must be");
kamg@124 3014 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
kamg@124 3015 }
kamg@124 3016
kamg@124 3017 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
kamg@124 3018 relocInfo::runtime_call_type);
kamg@124 3019 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
kamg@124 3020
kamg@124 3021 __ bind(skip);
kamg@124 3022
kamg@124 3023 }
kamg@124 3024
kamg@124 3025 }
kamg@124 3026 __ mov(L7_thread_cache, G2_thread);
kamg@124 3027 __ restore();
kamg@124 3028
kamg@124 3029 }
kamg@124 3030
kamg@124 3031
kamg@124 3032 // Ok now we are done. Need to place the nop that dtrace wants in order to
kamg@124 3033 // patch in the trap
kamg@124 3034
kamg@124 3035 int patch_offset = ((intptr_t)__ pc()) - start;
kamg@124 3036
kamg@124 3037 __ nop();
kamg@124 3038
kamg@124 3039
kamg@124 3040 // Return
kamg@124 3041
kamg@124 3042 __ ret();
kamg@124 3043 __ delayed()->restore();
kamg@124 3044
kamg@124 3045 __ flush();
kamg@124 3046
kamg@124 3047 nmethod *nm = nmethod::new_dtrace_nmethod(
kamg@124 3048 method, masm->code(), vep_offset, patch_offset, frame_complete,
kamg@124 3049 stack_slots / VMRegImpl::slots_per_word);
kamg@124 3050 return nm;
kamg@124 3051
kamg@124 3052 }
kamg@124 3053
kamg@124 3054 #endif // HAVE_DTRACE_H
kamg@124 3055
duke@0 3056 // this function returns the adjust size (in number of words) to a c2i adapter
duke@0 3057 // activation for use during deoptimization
duke@0 3058 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
duke@0 3059 assert(callee_locals >= callee_parameters,
duke@0 3060 "test and remove; got more parms than locals");
duke@0 3061 if (callee_locals < callee_parameters)
duke@0 3062 return 0; // No adjustment for negative locals
duke@0 3063 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords();
duke@0 3064 return round_to(diff, WordsPerLong);
duke@0 3065 }
duke@0 3066
duke@0 3067 // "Top of Stack" slots that may be unused by the calling convention but must
duke@0 3068 // otherwise be preserved.
duke@0 3069 // On Intel these are not necessary and the value can be zero.
duke@0 3070 // On Sparc this describes the words reserved for storing a register window
duke@0 3071 // when an interrupt occurs.
duke@0 3072 uint SharedRuntime::out_preserve_stack_slots() {
duke@0 3073 return frame::register_save_words * VMRegImpl::slots_per_word;
duke@0 3074 }
duke@0 3075
duke@0 3076 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
duke@0 3077 //
duke@0 3078 // Common out the new frame generation for deopt and uncommon trap
duke@0 3079 //
duke@0 3080 Register G3pcs = G3_scratch; // Array of new pcs (input)
duke@0 3081 Register Oreturn0 = O0;
duke@0 3082 Register Oreturn1 = O1;
duke@0 3083 Register O2UnrollBlock = O2;
duke@0 3084 Register O3array = O3; // Array of frame sizes (input)
duke@0 3085 Register O4array_size = O4; // number of frames (input)
duke@0 3086 Register O7frame_size = O7; // number of frames (input)
duke@0 3087
duke@0 3088 __ ld_ptr(O3array, 0, O7frame_size);
duke@0 3089 __ sub(G0, O7frame_size, O7frame_size);
duke@0 3090 __ save(SP, O7frame_size, SP);
duke@0 3091 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
duke@0 3092
duke@0 3093 #ifdef ASSERT
duke@0 3094 // make sure that the frames are aligned properly
duke@0 3095 #ifndef _LP64
duke@0 3096 __ btst(wordSize*2-1, SP);
duke@0 3097 __ breakpoint_trap(Assembler::notZero);
duke@0 3098 #endif
duke@0 3099 #endif
duke@0 3100
duke@0 3101 // Deopt needs to pass some extra live values from frame to frame
duke@0 3102
duke@0 3103 if (deopt) {
duke@0 3104 __ mov(Oreturn0->after_save(), Oreturn0);
duke@0 3105 __ mov(Oreturn1->after_save(), Oreturn1);
duke@0 3106 }
duke@0 3107
duke@0 3108 __ mov(O4array_size->after_save(), O4array_size);
duke@0 3109 __ sub(O4array_size, 1, O4array_size);
duke@0 3110 __ mov(O3array->after_save(), O3array);
duke@0 3111 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
duke@0 3112 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
duke@0 3113
duke@0 3114 #ifdef ASSERT
duke@0 3115 // trash registers to show a clear pattern in backtraces
duke@0 3116 __ set(0xDEAD0000, I0);
duke@0 3117 __ add(I0, 2, I1);
duke@0 3118 __ add(I0, 4, I2);
duke@0 3119 __ add(I0, 6, I3);
duke@0 3120 __ add(I0, 8, I4);
duke@0 3121 // Don't touch I5 could have valuable savedSP
duke@0 3122 __ set(0xDEADBEEF, L0);
duke@0 3123 __ mov(L0, L1);
duke@0 3124 __ mov(L0, L2);
duke@0 3125 __ mov(L0, L3);
duke@0 3126 __ mov(L0, L4);
duke@0 3127 __ mov(L0, L5);
duke@0 3128
duke@0 3129 // trash the return value as there is nothing to return yet
duke@0 3130 __ set(0xDEAD0001, O7);
duke@0 3131 #endif
duke@0 3132
duke@0 3133 __ mov(SP, O5_savedSP);
duke@0 3134 }
duke@0 3135
duke@0 3136
duke@0 3137 static void make_new_frames(MacroAssembler* masm, bool deopt) {
duke@0 3138 //
duke@0 3139 // loop through the UnrollBlock info and create new frames
duke@0 3140 //
duke@0 3141 Register G3pcs = G3_scratch;
duke@0 3142 Register Oreturn0 = O0;
duke@0 3143 Register Oreturn1 = O1;
duke@0 3144 Register O2UnrollBlock = O2;
duke@0 3145 Register O3array = O3;
duke@0 3146 Register O4array_size = O4;
duke@0 3147 Label loop;
duke@0 3148
duke@0 3149 // Before we make new frames, check to see if stack is available.
duke@0 3150 // Do this after the caller's return address is on top of stack
duke@0 3151 if (UseStackBanging) {
duke@0 3152 // Get total frame size for interpreted frames
twisti@720 3153 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
duke@0 3154 __ bang_stack_size(O4, O3, G3_scratch);
duke@0 3155 }
duke@0 3156
twisti@720 3157 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
twisti@720 3158 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
twisti@720 3159 __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
duke@0 3160
duke@0 3161 // Adjust old interpreter frame to make space for new frame's extra java locals
duke@0 3162 //
duke@0 3163 // We capture the original sp for the transition frame only because it is needed in
duke@0 3164 // order to properly calculate interpreter_sp_adjustment. Even though in real life
duke@0 3165 // every interpreter frame captures a savedSP it is only needed at the transition
duke@0 3166 // (fortunately). If we had to have it correct everywhere then we would need to
duke@0 3167 // be told the sp_adjustment for each frame we create. If the frame size array
duke@0 3168 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
duke@0 3169 // for each frame we create and keep up the illusion every where.
duke@0 3170 //
duke@0 3171
twisti@720 3172 __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
duke@0 3173 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
duke@0 3174 __ sub(SP, O7, SP);
duke@0 3175
duke@0 3176 #ifdef ASSERT
duke@0 3177 // make sure that there is at least one entry in the array
duke@0 3178 __ tst(O4array_size);
duke@0 3179 __ breakpoint_trap(Assembler::zero);
duke@0 3180 #endif
duke@0 3181
duke@0 3182 // Now push the new interpreter frames
duke@0 3183 __ bind(loop);
duke@0 3184
duke@0 3185 // allocate a new frame, filling the registers
duke@0 3186
duke@0 3187 gen_new_frame(masm, deopt); // allocate an interpreter frame
duke@0 3188
duke@0 3189 __ tst(O4array_size);
duke@0 3190 __ br(Assembler::notZero, false, Assembler::pn, loop);
duke@0 3191 __ delayed()->add(O3array, wordSize, O3array);
duke@0 3192 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
duke@0 3193
duke@0 3194 }
duke@0 3195
duke@0 3196 //------------------------------generate_deopt_blob----------------------------
duke@0 3197 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
duke@0 3198 // instead.
duke@0 3199 void SharedRuntime::generate_deopt_blob() {
duke@0 3200 // allocate space for the code
duke@0 3201 ResourceMark rm;
duke@0 3202 // setup code generation tools
duke@0 3203 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
duke@0 3204 #ifdef _LP64
duke@0 3205 CodeBuffer buffer("deopt_blob", 2100+pad, 512);
duke@0 3206 #else
duke@0 3207 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
duke@0 3208 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
duke@0 3209 CodeBuffer buffer("deopt_blob", 1600+pad, 512);
duke@0 3210 #endif /* _LP64 */
duke@0 3211 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@0 3212 FloatRegister Freturn0 = F0;
duke@0 3213 Register Greturn1 = G1;
duke@0 3214 Register Oreturn0 = O0;
duke@0 3215 Register Oreturn1 = O1;
duke@0 3216 Register O2UnrollBlock = O2;
duke@0 3217 Register O3tmp = O3;
duke@0 3218 Register I5exception_tmp = I5;
duke@0 3219 Register G4exception_tmp = G4_scratch;
duke@0 3220 int frame_size_words;
twisti@720 3221 Address saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
duke@0 3222 #if !defined(_LP64) && defined(COMPILER2)
twisti@720 3223 Address saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
duke@0 3224 #endif
duke@0 3225 Label cont;
duke@0 3226
duke@0 3227 OopMapSet *oop_maps = new OopMapSet();
duke@0 3228
duke@0 3229 //
duke@0 3230 // This is the entry point for code which is returning to a de-optimized
duke@0 3231 // frame.
duke@0 3232 // The steps taken by this frame are as follows:
duke@0 3233 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
duke@0 3234 // and all potentially live registers (at a pollpoint many registers can be live).
duke@0 3235 //
duke@0 3236 // - call the C routine: Deoptimization::fetch_unroll_info (this function
duke@0 3237 // returns information about the number and size of interpreter frames
duke@0 3238 // which are equivalent to the frame which is being deoptimized)
duke@0 3239 // - deallocate the unpack frame, restoring only results values. Other
duke@0 3240 // volatile registers will now be captured in the vframeArray as needed.
duke@0 3241 // - deallocate the deoptimization frame
duke@0 3242 // - in a loop using the information returned in the previous step
duke@0 3243 // push new interpreter frames (take care to propagate the return
duke@0 3244 // values through each new frame pushed)
duke@0 3245 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
duke@0 3246 // - call the C routine: Deoptimization::unpack_frames (this function
duke@0 3247 // lays out values on the interpreter frame which was just created)
duke@0 3248 // - deallocate the dummy unpack_frame
duke@0 3249 // - ensure that all the return values are correctly set and then do
duke@0 3250 // a return to the interpreter entry point
duke@0 3251 //
duke@0 3252 // Refer to the following methods for more information:
duke@0 3253 // - Deoptimization::fetch_unroll_info
duke@0 3254 // - Deoptimization::unpack_frames
duke@0 3255
duke@0 3256 OopMap* map = NULL;
duke@0 3257
duke@0 3258 int start = __ offset();
duke@0 3259
duke@0 3260 // restore G2, the trampoline destroyed it
duke@0 3261 __ get_thread();
duke@0 3262
duke@0 3263 // On entry we have been called by the deoptimized nmethod with a call that
duke@0 3264 // replaced the original call (or safepoint polling location) so the deoptimizing
duke@0 3265 // pc is now in O7. Return values are still in the expected places
duke@0 3266
duke@0 3267 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@0 3268 __ ba(false, cont);
duke@0 3269 __ delayed()->mov(Deoptimization::Unpack_deopt, I5exception_tmp);
duke@0 3270
duke@0 3271 int exception_offset = __ offset() - start;
duke@0 3272
duke@0 3273 // restore G2, the trampoline destroyed it
duke@0 3274 __ get_thread();
duke@0 3275
duke@0 3276 // On entry we have been jumped to by the exception handler (or exception_blob
duke@0 3277 // for server). O0 contains the exception oop and O7 contains the original
duke@0 3278 // exception pc. So if we push a frame here it will look to the
duke@0 3279 // stack walking code (fetch_unroll_info) just like a normal call so
duke@0 3280 // state will be extracted normally.
duke@0 3281
duke@0 3282 // save exception oop in JavaThread and fall through into the
duke@0 3283 // exception_in_tls case since they are handled in same way except
duke@0 3284 // for where the pending exception is kept.
twisti@720 3285 __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
duke@0 3286
duke@0 3287 //
duke@0 3288 // Vanilla deoptimization with an exception pending in exception_oop
duke@0 3289 //
duke@0 3290 int exception_in_tls_offset = __ offset() - start;
duke@0 3291
duke@0 3292 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
duke@0 3293 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@0 3294
duke@0 3295 // Restore G2_thread
duke@0 3296 __ get_thread();
duke@0 3297
duke@0 3298 #ifdef ASSERT
duke@0 3299 {
duke@0 3300 // verify that there is really an exception oop in exception_oop
duke@0 3301 Label has_exception;
twisti@720 3302 __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
duke@0 3303 __ br_notnull(Oexception, false, Assembler::pt, has_exception);
duke@0 3304 __ delayed()-> nop();
duke@0 3305 __ stop("no exception in thread");
duke@0 3306 __ bind(has_exception);
duke@0 3307
duke@0 3308 // verify that there is no pending exception
duke@0 3309 Label no_pending_exception;
twisti@720 3310 Address exception_addr(G2_thread, Thread::pending_exception_offset());
duke@0 3311 __ ld_ptr(exception_addr, Oexception);
duke@0 3312 __ br_null(Oexception, false, Assembler::pt, no_pending_exception);
duke@0 3313 __ delayed()->nop();
duke@0 3314 __ stop("must not have pending exception here");
duke@0 3315 __ bind(no_pending_exception);
duke@0 3316 }
duke@0 3317 #endif
duke@0 3318
duke@0 3319 __ ba(false, cont);
duke@0 3320 __ delayed()->mov(Deoptimization::Unpack_exception, I5exception_tmp);;
duke@0 3321
duke@0 3322 //
duke@0 3323 // Reexecute entry, similar to c2 uncommon trap
duke@0 3324 //
duke@0 3325 int reexecute_offset = __ offset() - start;
duke@0 3326
duke@0 3327 // No need to update oop_map as each call to save_live_registers will produce identical oopmap
duke@0 3328 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
duke@0 3329
duke@0 3330 __ mov(Deoptimization::Unpack_reexecute, I5exception_tmp);
duke@0 3331
duke@0 3332 __ bind(cont);
duke@0 3333
duke@0 3334 __ set_last_Java_frame(SP, noreg);
duke@0 3335
duke@0 3336 // do the call by hand so we can get the oopmap
duke@0 3337
duke@0 3338 __ mov(G2_thread, L7_thread_cache);
duke@0 3339 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
duke@0 3340 __ delayed()->mov(G2_thread, O0);
duke@0 3341
duke@0 3342 // Set an oopmap for the call site this describes all our saved volatile registers
duke@0 3343
duke@0 3344 oop_maps->add_gc_map( __ offset()-start, map);
duke@0 3345
duke@0 3346 __ mov(L7_thread_cache, G2_thread);
duke@0 3347
duke@0 3348 __ reset_last_Java_frame();
duke@0 3349
duke@0 3350 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
duke@0 3351 // so this move will survive
duke@0 3352
duke@0 3353 __ mov(I5exception_tmp, G4exception_tmp);
duke@0 3354
duke@0 3355 __ mov(O0, O2UnrollBlock->after_save());
duke@0 3356
duke@0 3357 RegisterSaver::restore_result_registers(masm);
duke@0 3358
duke@0 3359 Label noException;
duke@0 3360 __ cmp(G4exception_tmp, Deoptimization::Unpack_exception); // Was exception pending?
duke@0 3361 __ br(Assembler::notEqual, false, Assembler::pt, noException);
duke@0 3362 __ delayed()->nop();
duke@0 3363
duke@0 3364 // Move the pending exception from exception_oop to Oexception so
duke@0 3365 // the pending exception will be picked up the interpreter.
duke@0 3366 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
duke@0 3367 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
duke@0 3368 __ bind(noException);
duke@0 3369
duke@0 3370 // deallocate the deoptimization frame taking care to preserve the return values
duke@0 3371 __ mov(Oreturn0, Oreturn0->after_save());
duke@0 3372 __ mov(Oreturn1, Oreturn1->after_save());
duke@0 3373 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
duke@0 3374 __ restore();
duke@0 3375
duke@0 3376 // Allocate new interpreter frame(s) and possible c2i adapter frame
duke@0 3377
duke@0 3378 make_new_frames(masm, true);
duke@0 3379
duke@0 3380 // push a dummy "unpack_frame" taking care of float return values and
duke@0 3381 // call Deoptimization::unpack_frames to have the unpacker layout
duke@0 3382 // information in the interpreter frames just created and then return
duke@0 3383 // to the interpreter entry point
duke@0 3384 __ save(SP, -frame_size_words*wordSize, SP);
duke@0 3385 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
duke@0 3386 #if !defined(_LP64)
duke@0 3387 #if defined(COMPILER2)
duke@0 3388 if (!TieredCompilation) {
duke@0 3389 // 32-bit 1-register longs return longs in G1
duke@0 3390 __ stx(Greturn1, saved_Greturn1_addr);
duke@0 3391 }
duke@0 3392 #endif
duke@0 3393 __ set_last_Java_frame(SP, noreg);
duke@0 3394 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4exception_tmp);
duke@0 3395 #else
duke@0 3396 // LP64 uses g4 in set_last_Java_frame
duke@0 3397 __ mov(G4exception_tmp, O1);
duke@0 3398 __ set_last_Java_frame(SP, G0);
duke@0 3399 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
duke@0 3400 #endif
duke@0 3401 __ reset_last_Java_frame();
duke@0 3402 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
duke@0 3403
duke@0 3404 // In tiered we never use C2 to compile methods returning longs so
duke@0 3405 // the result is where we expect it already.
duke@0 3406
duke@0 3407 #if !defined(_LP64) && defined(COMPILER2)
duke@0 3408 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
duke@0 3409 // I0/I1 if the return value is long. In the tiered world there is
duke@0 3410 // a mismatch between how C1 and C2 return longs compiles and so
duke@0 3411 // currently compilation of methods which return longs is disabled
duke@0 3412 // for C2 and so is this code. Eventually C1 and C2 will do the
duke@0 3413 // same thing for longs in the tiered world.
duke@0 3414 if (!TieredCompilation) {
duke@0 3415 Label not_long;
duke@0 3416 __ cmp(O0,T_LONG);
duke@0 3417 __ br(Assembler::notEqual, false, Assembler::pt, not_long);
duke@0 3418 __ delayed()->nop();
duke@0 3419 __ ldd(saved_Greturn1_addr,I0);
duke@0 3420 __ bind(not_long);
duke@0 3421 }
duke@0 3422 #endif
duke@0 3423 __ ret();
duke@0 3424 __ delayed()->restore();
duke@0 3425
duke@0 3426 masm->flush();
duke@0 3427 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
duke@0 3428 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
duke@0 3429 }
duke@0 3430
duke@0 3431 #ifdef COMPILER2
duke@0 3432
duke@0 3433 //------------------------------generate_uncommon_trap_blob--------------------
duke@0 3434 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
duke@0 3435 // instead.
duke@0 3436 void SharedRuntime::generate_uncommon_trap_blob() {
duke@0 3437 // allocate space for the code
duke@0 3438 ResourceMark rm;
duke@0 3439 // setup code generation tools
duke@0 3440 int pad = VerifyThread ? 512 : 0;
duke@0 3441 #ifdef _LP64
duke@0 3442 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
duke@0 3443 #else
duke@0 3444 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
duke@0 3445 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
duke@0 3446 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
duke@0 3447 #endif
duke@0 3448 MacroAssembler* masm = new MacroAssembler(&buffer);
duke@0 3449 Register O2UnrollBlock = O2;
duke@0 3450 Register O3tmp = O3;
duke@0 3451 Register O2klass_index = O2;
duke@0 3452
duke@0 3453 //
duke@0 3454 // This is the entry point for all traps the compiler takes when it thinks
duke@0 3455 // it cannot handle further execution of compilation code. The frame is
duke@0 3456 // deoptimized in these cases and converted into interpreter frames for
duke@0 3457 // execution
duke@0 3458 // The steps taken by this frame are as follows:
duke@0 3459 // - push a fake "unpack_frame"
duke@0 3460 // - call the C routine Deoptimization::uncommon_trap (this function
duke@0 3461 // packs the current compiled frame into vframe arrays and returns
duke@0 3462 // information about the number and size of interpreter frames which
duke@0 3463 // are equivalent to the frame which is being deoptimized)
duke@0 3464 // - deallocate the "unpack_frame"
duke@0 3465 // - deallocate the deoptimization frame
duke@0 3466 // - in a loop using the information returned in the previous step
duke@0 3467 // push interpreter frames;
duke@0 3468 // - create a dummy "unpack_frame"
duke@0 3469 // - call the C routine: Deoptimization::unpack_frames (this function
duke@0 3470 // lays out values on the interpreter frame which was just created)
duke@0 3471 // - deallocate the dummy unpack_frame
duke@0 3472 // - return to the interpreter entry point
duke@0 3473 //
duke@0 3474 // Refer to the following methods for more information:
duke@0 3475 // - Deoptimization::uncommon_trap
duke@0 3476 // - Deoptimization::unpack_frame
duke@0 3477
duke@0 3478 // the unloaded class index is in O0 (first parameter to this blob)
duke@0 3479
duke@0 3480 // push a dummy "unpack_frame"
duke@0 3481 // and call Deoptimization::uncommon_trap to pack the compiled frame into
duke@0 3482 // vframe array and return the UnrollBlock information
duke@0 3483 __ save_frame(0);
duke@0 3484 __ set_last_Java_frame(SP, noreg);
duke@0 3485 __ mov(I0, O2klass_index);
duke@0 3486 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
duke@0 3487 __ reset_last_Java_frame();
duke@0 3488 __ mov(O0, O2UnrollBlock->after_save());
duke@0 3489 __ restore();
duke@0 3490
duke@0 3491 // deallocate the deoptimized frame taking care to preserve the return values
duke@0 3492 __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
duke@0 3493 __ restore();
duke@0 3494
duke@0 3495 // Allocate new interpreter frame(s) and possible c2i adapter frame
duke@0 3496
duke@0 3497 make_new_frames(masm, false);
duke@0 3498
duke@0 3499 // push a dummy "unpack_frame" taking care of float return values and
duke@0 3500 // call Deoptimization::unpack_frames to have the unpacker layout
duke@0 3501 // information in the interpreter frames just created and then return
duke@0 3502 // to the interpreter entry point
duke@0 3503 __ save_frame(0);
duke@0 3504 __ set_last_Java_frame(SP, noreg);
duke@0 3505 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
duke@0 3506 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
duke@0 3507 __ reset_last_Java_frame();
duke@0 3508 __ ret();
duke@0 3509 __ delayed()->restore();
duke@0 3510
duke@0 3511