duke@0
|
1 /*
|
never@2027
|
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
|
duke@0
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
duke@0
|
4 *
|
duke@0
|
5 * This code is free software; you can redistribute it and/or modify it
|
duke@0
|
6 * under the terms of the GNU General Public License version 2 only, as
|
duke@0
|
7 * published by the Free Software Foundation.
|
duke@0
|
8 *
|
duke@0
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
duke@0
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
duke@0
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
duke@0
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
duke@0
|
13 * accompanied this code).
|
duke@0
|
14 *
|
duke@0
|
15 * You should have received a copy of the GNU General Public License version
|
duke@0
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
duke@0
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
duke@0
|
18 *
|
trims@1472
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
trims@1472
|
20 * or visit www.oracle.com if you need additional information or have any
|
trims@1472
|
21 * questions.
|
duke@0
|
22 *
|
duke@0
|
23 */
|
duke@0
|
24
|
stefank@1879
|
25 #include "precompiled.hpp"
|
stefank@1879
|
26 #include "classfile/vmSymbols.hpp"
|
twisti@3534
|
27 #include "interpreter/bytecode.hpp"
|
stefank@1879
|
28 #include "interpreter/interpreter.hpp"
|
stefank@1879
|
29 #include "memory/allocation.inline.hpp"
|
stefank@1879
|
30 #include "memory/resourceArea.hpp"
|
stefank@1879
|
31 #include "memory/universe.inline.hpp"
|
stefank@1879
|
32 #include "oops/methodDataOop.hpp"
|
stefank@1879
|
33 #include "oops/oop.inline.hpp"
|
stefank@1879
|
34 #include "prims/jvmtiThreadState.hpp"
|
stefank@1879
|
35 #include "runtime/handles.inline.hpp"
|
stefank@1879
|
36 #include "runtime/monitorChunk.hpp"
|
stefank@1879
|
37 #include "runtime/sharedRuntime.hpp"
|
stefank@1879
|
38 #include "runtime/vframe.hpp"
|
stefank@1879
|
39 #include "runtime/vframeArray.hpp"
|
stefank@1879
|
40 #include "runtime/vframe_hp.hpp"
|
stefank@1879
|
41 #include "utilities/events.hpp"
|
stefank@1879
|
42 #ifdef COMPILER2
|
stefank@1879
|
43 #include "opto/runtime.hpp"
|
stefank@1879
|
44 #endif
|
duke@0
|
45
|
duke@0
|
46
|
duke@0
|
47 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
|
duke@0
|
48
|
duke@0
|
49 void vframeArrayElement::free_monitors(JavaThread* jt) {
|
duke@0
|
50 if (_monitors != NULL) {
|
duke@0
|
51 MonitorChunk* chunk = _monitors;
|
duke@0
|
52 _monitors = NULL;
|
duke@0
|
53 jt->remove_monitor_chunk(chunk);
|
duke@0
|
54 delete chunk;
|
duke@0
|
55 }
|
duke@0
|
56 }
|
duke@0
|
57
|
duke@0
|
58 void vframeArrayElement::fill_in(compiledVFrame* vf) {
|
duke@0
|
59
|
duke@0
|
60 // Copy the information from the compiled vframe to the
|
duke@0
|
61 // interpreter frame we will be creating to replace vf
|
duke@0
|
62
|
duke@0
|
63 _method = vf->method();
|
duke@0
|
64 _bci = vf->raw_bci();
|
cfang@900
|
65 _reexecute = vf->should_reexecute();
|
duke@0
|
66
|
duke@0
|
67 int index;
|
duke@0
|
68
|
duke@0
|
69 // Get the monitors off-stack
|
duke@0
|
70
|
duke@0
|
71 GrowableArray<MonitorInfo*>* list = vf->monitors();
|
duke@0
|
72 if (list->is_empty()) {
|
duke@0
|
73 _monitors = NULL;
|
duke@0
|
74 } else {
|
duke@0
|
75
|
duke@0
|
76 // Allocate monitor chunk
|
duke@0
|
77 _monitors = new MonitorChunk(list->length());
|
duke@0
|
78 vf->thread()->add_monitor_chunk(_monitors);
|
duke@0
|
79
|
duke@0
|
80 // Migrate the BasicLocks from the stack to the monitor chunk
|
duke@0
|
81 for (index = 0; index < list->length(); index++) {
|
duke@0
|
82 MonitorInfo* monitor = list->at(index);
|
kvn@818
|
83 assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
|
duke@0
|
84 assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
|
duke@0
|
85 BasicObjectLock* dest = _monitors->at(index);
|
duke@0
|
86 dest->set_obj(monitor->owner());
|
duke@0
|
87 monitor->lock()->move_to(monitor->owner(), dest->lock());
|
duke@0
|
88 }
|
duke@0
|
89 }
|
duke@0
|
90
|
duke@0
|
91 // Convert the vframe locals and expressions to off stack
|
duke@0
|
92 // values. Because we will not gc all oops can be converted to
|
duke@0
|
93 // intptr_t (i.e. a stack slot) and we are fine. This is
|
duke@0
|
94 // good since we are inside a HandleMark and the oops in our
|
duke@0
|
95 // collection would go away between packing them here and
|
duke@0
|
96 // unpacking them in unpack_on_stack.
|
duke@0
|
97
|
duke@0
|
98 // First the locals go off-stack
|
duke@0
|
99
|
duke@0
|
100 // FIXME this seems silly it creates a StackValueCollection
|
duke@0
|
101 // in order to get the size to then copy them and
|
duke@0
|
102 // convert the types to intptr_t size slots. Seems like it
|
duke@0
|
103 // could do it in place... Still uses less memory than the
|
duke@0
|
104 // old way though
|
duke@0
|
105
|
duke@0
|
106 StackValueCollection *locs = vf->locals();
|
duke@0
|
107 _locals = new StackValueCollection(locs->size());
|
duke@0
|
108 for(index = 0; index < locs->size(); index++) {
|
duke@0
|
109 StackValue* value = locs->at(index);
|
duke@0
|
110 switch(value->type()) {
|
duke@0
|
111 case T_OBJECT:
|
kvn@818
|
112 assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
duke@0
|
113 // preserve object type
|
duke@0
|
114 _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
|
duke@0
|
115 break;
|
duke@0
|
116 case T_CONFLICT:
|
duke@0
|
117 // A dead local. Will be initialized to null/zero.
|
duke@0
|
118 _locals->add( new StackValue());
|
duke@0
|
119 break;
|
duke@0
|
120 case T_INT:
|
duke@0
|
121 _locals->add( new StackValue(value->get_int()));
|
duke@0
|
122 break;
|
duke@0
|
123 default:
|
duke@0
|
124 ShouldNotReachHere();
|
duke@0
|
125 }
|
duke@0
|
126 }
|
duke@0
|
127
|
duke@0
|
128 // Now the expressions off-stack
|
duke@0
|
129 // Same silliness as above
|
duke@0
|
130
|
duke@0
|
131 StackValueCollection *exprs = vf->expressions();
|
duke@0
|
132 _expressions = new StackValueCollection(exprs->size());
|
duke@0
|
133 for(index = 0; index < exprs->size(); index++) {
|
duke@0
|
134 StackValue* value = exprs->at(index);
|
duke@0
|
135 switch(value->type()) {
|
duke@0
|
136 case T_OBJECT:
|
kvn@818
|
137 assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
|
duke@0
|
138 // preserve object type
|
duke@0
|
139 _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
|
duke@0
|
140 break;
|
duke@0
|
141 case T_CONFLICT:
|
duke@0
|
142 // A dead stack element. Will be initialized to null/zero.
|
duke@0
|
143 // This can occur when the compiler emits a state in which stack
|
duke@0
|
144 // elements are known to be dead (because of an imminent exception).
|
duke@0
|
145 _expressions->add( new StackValue());
|
duke@0
|
146 break;
|
duke@0
|
147 case T_INT:
|
duke@0
|
148 _expressions->add( new StackValue(value->get_int()));
|
duke@0
|
149 break;
|
duke@0
|
150 default:
|
duke@0
|
151 ShouldNotReachHere();
|
duke@0
|
152 }
|
duke@0
|
153 }
|
duke@0
|
154 }
|
duke@0
|
155
|
duke@0
|
156 int unpack_counter = 0;
|
duke@0
|
157
|
never@2466
|
158 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
|
never@2466
|
159 int callee_parameters,
|
duke@0
|
160 int callee_locals,
|
duke@0
|
161 frame* caller,
|
duke@0
|
162 bool is_top_frame,
|
duke@0
|
163 int exec_mode) {
|
duke@0
|
164 JavaThread* thread = (JavaThread*) Thread::current();
|
duke@0
|
165
|
duke@0
|
166 // Look at bci and decide on bcp and continuation pc
|
duke@0
|
167 address bcp;
|
duke@0
|
168 // C++ interpreter doesn't need a pc since it will figure out what to do when it
|
duke@0
|
169 // begins execution
|
duke@0
|
170 address pc;
|
cfang@900
|
171 bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
|
cfang@900
|
172 // rather than the one associated with bcp
|
duke@0
|
173 if (raw_bci() == SynchronizationEntryBCI) {
|
duke@0
|
174 // We are deoptimizing while hanging in prologue code for synchronized method
|
duke@0
|
175 bcp = method()->bcp_from(0); // first byte code
|
duke@0
|
176 pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
|
cfang@900
|
177 } else if (should_reexecute()) { //reexecute this bytecode
|
cfang@900
|
178 assert(is_top_frame, "reexecute allowed only for the top frame");
|
cfang@900
|
179 bcp = method()->bcp_from(bci());
|
cfang@900
|
180 pc = Interpreter::deopt_reexecute_entry(method(), bcp);
|
duke@0
|
181 } else {
|
duke@0
|
182 bcp = method()->bcp_from(bci());
|
cfang@900
|
183 pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
|
cfang@900
|
184 use_next_mdp = true;
|
duke@0
|
185 }
|
duke@0
|
186 assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
|
duke@0
|
187
|
duke@0
|
188 // Monitorenter and pending exceptions:
|
duke@0
|
189 //
|
duke@0
|
190 // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
|
duke@0
|
191 // because there is no safepoint at the null pointer check (it is either handled explicitly
|
duke@0
|
192 // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
|
duke@0
|
193 // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER). If an asynchronous
|
duke@0
|
194 // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
|
duke@0
|
195 // the monitorenter to place it in the proper exception range.
|
duke@0
|
196 //
|
duke@0
|
197 // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
|
duke@0
|
198 // in which case bcp should point to the monitorenter since it is within the exception's range.
|
duke@0
|
199
|
duke@0
|
200 assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
|
iveresov@1734
|
201 assert(thread->deopt_nmethod() != NULL, "nmethod should be known");
|
iveresov@1734
|
202 guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() &&
|
iveresov@1734
|
203 *bcp == Bytecodes::_monitorenter &&
|
iveresov@1734
|
204 exec_mode == Deoptimization::Unpack_exception),
|
iveresov@1734
|
205 "shouldn't get exception during monitorenter");
|
duke@0
|
206
|
duke@0
|
207 int popframe_preserved_args_size_in_bytes = 0;
|
duke@0
|
208 int popframe_preserved_args_size_in_words = 0;
|
duke@0
|
209 if (is_top_frame) {
|
kvn@1255
|
210 JvmtiThreadState *state = thread->jvmti_thread_state();
|
duke@0
|
211 if (JvmtiExport::can_pop_frame() &&
|
duke@0
|
212 (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
|
duke@0
|
213 if (thread->has_pending_popframe()) {
|
duke@0
|
214 // Pop top frame after deoptimization
|
duke@0
|
215 #ifndef CC_INTERP
|
duke@0
|
216 pc = Interpreter::remove_activation_preserving_args_entry();
|
duke@0
|
217 #else
|
duke@0
|
218 // Do an uncommon trap type entry. c++ interpreter will know
|
duke@0
|
219 // to pop frame and preserve the args
|
duke@0
|
220 pc = Interpreter::deopt_entry(vtos, 0);
|
duke@0
|
221 use_next_mdp = false;
|
duke@0
|
222 #endif
|
duke@0
|
223 } else {
|
duke@0
|
224 // Reexecute invoke in top frame
|
duke@0
|
225 pc = Interpreter::deopt_entry(vtos, 0);
|
duke@0
|
226 use_next_mdp = false;
|
duke@0
|
227 popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
|
duke@0
|
228 // Note: the PopFrame-related extension of the expression stack size is done in
|
duke@0
|
229 // Deoptimization::fetch_unroll_info_helper
|
duke@0
|
230 popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
|
duke@0
|
231 }
|
duke@0
|
232 } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
|
duke@0
|
233 // Force early return from top frame after deoptimization
|
duke@0
|
234 #ifndef CC_INTERP
|
duke@0
|
235 pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
|
duke@0
|
236 #else
|
duke@0
|
237 // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64)
|
duke@0
|
238 #endif
|
duke@0
|
239 } else {
|
duke@0
|
240 // Possibly override the previous pc computation of the top (youngest) frame
|
duke@0
|
241 switch (exec_mode) {
|
duke@0
|
242 case Deoptimization::Unpack_deopt:
|
duke@0
|
243 // use what we've got
|
duke@0
|
244 break;
|
duke@0
|
245 case Deoptimization::Unpack_exception:
|
duke@0
|
246 // exception is pending
|
twisti@1295
|
247 pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
|
duke@0
|
248 // [phh] We're going to end up in some handler or other, so it doesn't
|
duke@0
|
249 // matter what mdp we point to. See exception_handler_for_exception()
|
duke@0
|
250 // in interpreterRuntime.cpp.
|
duke@0
|
251 break;
|
duke@0
|
252 case Deoptimization::Unpack_uncommon_trap:
|
duke@0
|
253 case Deoptimization::Unpack_reexecute:
|
duke@0
|
254 // redo last byte code
|
duke@0
|
255 pc = Interpreter::deopt_entry(vtos, 0);
|
duke@0
|
256 use_next_mdp = false;
|
duke@0
|
257 break;
|
duke@0
|
258 default:
|
duke@0
|
259 ShouldNotReachHere();
|
duke@0
|
260 }
|
duke@0
|
261 }
|
duke@0
|
262 }
|
duke@0
|
263
|
duke@0
|
264 // Setup the interpreter frame
|
duke@0
|
265
|
duke@0
|
266 assert(method() != NULL, "method must exist");
|
duke@0
|
267 int temps = expressions()->size();
|
duke@0
|
268
|
duke@0
|
269 int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
|
duke@0
|
270
|
duke@0
|
271 Interpreter::layout_activation(method(),
|
duke@0
|
272 temps + callee_parameters,
|
duke@0
|
273 popframe_preserved_args_size_in_words,
|
duke@0
|
274 locks,
|
never@2466
|
275 caller_actual_parameters,
|
duke@0
|
276 callee_parameters,
|
duke@0
|
277 callee_locals,
|
duke@0
|
278 caller,
|
duke@0
|
279 iframe(),
|
duke@0
|
280 is_top_frame);
|
duke@0
|
281
|
duke@0
|
282 // Update the pc in the frame object and overwrite the temporary pc
|
duke@0
|
283 // we placed in the skeletal frame now that we finally know the
|
duke@0
|
284 // exact interpreter address we should use.
|
duke@0
|
285
|
duke@0
|
286 _frame.patch_pc(thread, pc);
|
duke@0
|
287
|
duke@0
|
288 assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
|
duke@0
|
289
|
duke@0
|
290 BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
|
duke@0
|
291 for (int index = 0; index < locks; index++) {
|
duke@0
|
292 top = iframe()->previous_monitor_in_interpreter_frame(top);
|
duke@0
|
293 BasicObjectLock* src = _monitors->at(index);
|
duke@0
|
294 top->set_obj(src->obj());
|
duke@0
|
295 src->lock()->move_to(src->obj(), top->lock());
|
duke@0
|
296 }
|
duke@0
|
297 if (ProfileInterpreter) {
|
duke@0
|
298 iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
|
duke@0
|
299 }
|
duke@0
|
300 iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
|
duke@0
|
301 if (ProfileInterpreter) {
|
duke@0
|
302 methodDataOop mdo = method()->method_data();
|
duke@0
|
303 if (mdo != NULL) {
|
duke@0
|
304 int bci = iframe()->interpreter_frame_bci();
|
duke@0
|
305 if (use_next_mdp) ++bci;
|
duke@0
|
306 address mdp = mdo->bci_to_dp(bci);
|
duke@0
|
307 iframe()->interpreter_frame_set_mdp(mdp);
|
duke@0
|
308 }
|
duke@0
|
309 }
|
duke@0
|
310
|
duke@0
|
311 // Unpack expression stack
|
duke@0
|
312 // If this is an intermediate frame (i.e. not top frame) then this
|
duke@0
|
313 // only unpacks the part of the expression stack not used by callee
|
duke@0
|
314 // as parameters. The callee parameters are unpacked as part of the
|
duke@0
|
315 // callee locals.
|
duke@0
|
316 int i;
|
duke@0
|
317 for(i = 0; i < expressions()->size(); i++) {
|
duke@0
|
318 StackValue *value = expressions()->at(i);
|
duke@0
|
319 intptr_t* addr = iframe()->interpreter_frame_expression_stack_at(i);
|
duke@0
|
320 switch(value->type()) {
|
duke@0
|
321 case T_INT:
|
duke@0
|
322 *addr = value->get_int();
|
duke@0
|
323 break;
|
duke@0
|
324 case T_OBJECT:
|
duke@0
|
325 *addr = value->get_int(T_OBJECT);
|
duke@0
|
326 break;
|
duke@0
|
327 case T_CONFLICT:
|
duke@0
|
328 // A dead stack slot. Initialize to null in case it is an oop.
|
duke@0
|
329 *addr = NULL_WORD;
|
duke@0
|
330 break;
|
duke@0
|
331 default:
|
duke@0
|
332 ShouldNotReachHere();
|
duke@0
|
333 }
|
duke@0
|
334 }
|
duke@0
|
335
|
duke@0
|
336
|
duke@0
|
337 // Unpack the locals
|
duke@0
|
338 for(i = 0; i < locals()->size(); i++) {
|
duke@0
|
339 StackValue *value = locals()->at(i);
|
duke@0
|
340 intptr_t* addr = iframe()->interpreter_frame_local_at(i);
|
duke@0
|
341 switch(value->type()) {
|
duke@0
|
342 case T_INT:
|
duke@0
|
343 *addr = value->get_int();
|
duke@0
|
344 break;
|
duke@0
|
345 case T_OBJECT:
|
duke@0
|
346 *addr = value->get_int(T_OBJECT);
|
duke@0
|
347 break;
|
duke@0
|
348 case T_CONFLICT:
|
duke@0
|
349 // A dead location. If it is an oop then we need a NULL to prevent GC from following it
|
duke@0
|
350 *addr = NULL_WORD;
|
duke@0
|
351 break;
|
duke@0
|
352 default:
|
duke@0
|
353 ShouldNotReachHere();
|
duke@0
|
354 }
|
duke@0
|
355 }
|
duke@0
|
356
|
duke@0
|
357 if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
|
duke@0
|
358 // An interpreted frame was popped but it returns to a deoptimized
|
duke@0
|
359 // frame. The incoming arguments to the interpreted activation
|
duke@0
|
360 // were preserved in thread-local storage by the
|
duke@0
|
361 // remove_activation_preserving_args_entry in the interpreter; now
|
duke@0
|
362 // we put them back into the just-unpacked interpreter frame.
|
duke@0
|
363 // Note that this assumes that the locals arena grows toward lower
|
duke@0
|
364 // addresses.
|
duke@0
|
365 if (popframe_preserved_args_size_in_words != 0) {
|
duke@0
|
366 void* saved_args = thread->popframe_preserved_args();
|
duke@0
|
367 assert(saved_args != NULL, "must have been saved by interpreter");
|
duke@0
|
368 #ifdef ASSERT
|
duke@0
|
369 assert(popframe_preserved_args_size_in_words <=
|
twisti@1426
|
370 iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
|
duke@0
|
371 "expression stack size should have been extended");
|
duke@0
|
372 #endif // ASSERT
|
duke@0
|
373 int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
|
duke@0
|
374 intptr_t* base;
|
duke@0
|
375 if (frame::interpreter_frame_expression_stack_direction() < 0) {
|
duke@0
|
376 base = iframe()->interpreter_frame_expression_stack_at(top_element);
|
duke@0
|
377 } else {
|
duke@0
|
378 base = iframe()->interpreter_frame_expression_stack();
|
duke@0
|
379 }
|
kvn@1523
|
380 Copy::conjoint_jbytes(saved_args,
|
kvn@1523
|
381 base,
|
kvn@1523
|
382 popframe_preserved_args_size_in_bytes);
|
duke@0
|
383 thread->popframe_free_preserved_args();
|
duke@0
|
384 }
|
duke@0
|
385 }
|
duke@0
|
386
|
duke@0
|
387 #ifndef PRODUCT
|
duke@0
|
388 if (TraceDeoptimization && Verbose) {
|
duke@0
|
389 ttyLocker ttyl;
|
duke@0
|
390 tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
|
duke@0
|
391 iframe()->print_on(tty);
|
duke@0
|
392 RegisterMap map(thread);
|
duke@0
|
393 vframe* f = vframe::new_vframe(iframe(), &map, thread);
|
duke@0
|
394 f->print();
|
duke@0
|
395
|
duke@0
|
396 tty->print_cr("locals size %d", locals()->size());
|
duke@0
|
397 tty->print_cr("expression size %d", expressions()->size());
|
duke@0
|
398
|
duke@0
|
399 method()->print_value();
|
duke@0
|
400 tty->cr();
|
duke@0
|
401 // method()->print_codes();
|
duke@0
|
402 } else if (TraceDeoptimization) {
|
duke@0
|
403 tty->print(" ");
|
duke@0
|
404 method()->print_value();
|
never@2027
|
405 Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
|
duke@0
|
406 int bci = method()->bci_from(bcp);
|
duke@0
|
407 tty->print(" - %s", Bytecodes::name(code));
|
duke@0
|
408 tty->print(" @ bci %d ", bci);
|
duke@0
|
409 tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
|
duke@0
|
410 }
|
duke@0
|
411 #endif // PRODUCT
|
duke@0
|
412
|
duke@0
|
413 // The expression stack and locals are in the resource area don't leave
|
duke@0
|
414 // a dangling pointer in the vframeArray we leave around for debug
|
duke@0
|
415 // purposes
|
duke@0
|
416
|
duke@0
|
417 _locals = _expressions = NULL;
|
duke@0
|
418
|
duke@0
|
419 }
|
duke@0
|
420
|
never@2466
|
421 int vframeArrayElement::on_stack_size(int caller_actual_parameters,
|
never@2466
|
422 int callee_parameters,
|
duke@0
|
423 int callee_locals,
|
duke@0
|
424 bool is_top_frame,
|
duke@0
|
425 int popframe_extra_stack_expression_els) const {
|
duke@0
|
426 assert(method()->max_locals() == locals()->size(), "just checking");
|
duke@0
|
427 int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
|
duke@0
|
428 int temps = expressions()->size();
|
duke@0
|
429 return Interpreter::size_activation(method(),
|
duke@0
|
430 temps + callee_parameters,
|
duke@0
|
431 popframe_extra_stack_expression_els,
|
duke@0
|
432 locks,
|
never@2466
|
433 caller_actual_parameters,
|
duke@0
|
434 callee_parameters,
|
duke@0
|
435 callee_locals,
|
duke@0
|
436 is_top_frame);
|
duke@0
|
437 }
|
duke@0
|
438
|
duke@0
|
439
|
duke@0
|
440
|
duke@0
|
441 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
|
duke@0
|
442 RegisterMap *reg_map, frame sender, frame caller, frame self) {
|
duke@0
|
443
|
duke@0
|
444 // Allocate the vframeArray
|
duke@0
|
445 vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
|
duke@0
|
446 sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
|
zgu@3465
|
447 mtCompiler);
|
duke@0
|
448 result->_frames = chunk->length();
|
duke@0
|
449 result->_owner_thread = thread;
|
duke@0
|
450 result->_sender = sender;
|
duke@0
|
451 result->_caller = caller;
|
duke@0
|
452 result->_original = self;
|
duke@0
|
453 result->set_unroll_block(NULL); // initialize it
|
duke@0
|
454 result->fill_in(thread, frame_size, chunk, reg_map);
|
duke@0
|
455 return result;
|
duke@0
|
456 }
|
duke@0
|
457
|
duke@0
|
458 void vframeArray::fill_in(JavaThread* thread,
|
duke@0
|
459 int frame_size,
|
duke@0
|
460 GrowableArray<compiledVFrame*>* chunk,
|
duke@0
|
461 const RegisterMap *reg_map) {
|
duke@0
|
462 // Set owner first, it is used when adding monitor chunks
|
duke@0
|
463
|
duke@0
|
464 _frame_size = frame_size;
|
duke@0
|
465 for(int i = 0; i < chunk->length(); i++) {
|
duke@0
|
466 element(i)->fill_in(chunk->at(i));
|
duke@0
|
467 }
|
duke@0
|
468
|
duke@0
|
469 // Copy registers for callee-saved registers
|
duke@0
|
470 if (reg_map != NULL) {
|
duke@0
|
471 for(int i = 0; i < RegisterMap::reg_count; i++) {
|
duke@0
|
472 #ifdef AMD64
|
duke@0
|
473 // The register map has one entry for every int (32-bit value), so
|
duke@0
|
474 // 64-bit physical registers have two entries in the map, one for
|
duke@0
|
475 // each half. Ignore the high halves of 64-bit registers, just like
|
duke@0
|
476 // frame::oopmapreg_to_location does.
|
duke@0
|
477 //
|
duke@0
|
478 // [phh] FIXME: this is a temporary hack! This code *should* work
|
duke@0
|
479 // correctly w/o this hack, possibly by changing RegisterMap::pd_location
|
duke@0
|
480 // in frame_amd64.cpp and the values of the phantom high half registers
|
duke@0
|
481 // in amd64.ad.
|
duke@0
|
482 // if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
|
duke@0
|
483 intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
|
duke@0
|
484 _callee_registers[i] = src != NULL ? *src : NULL_WORD;
|
duke@0
|
485 // } else {
|
duke@0
|
486 // jint* src = (jint*) reg_map->location(VMReg::Name(i));
|
duke@0
|
487 // _callee_registers[i] = src != NULL ? *src : NULL_WORD;
|
duke@0
|
488 // }
|
duke@0
|
489 #else
|
duke@0
|
490 jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
|
duke@0
|
491 _callee_registers[i] = src != NULL ? *src : NULL_WORD;
|
duke@0
|
492 #endif
|
duke@0
|
493 if (src == NULL) {
|
duke@0
|
494 set_location_valid(i, false);
|
duke@0
|
495 } else {
|
duke@0
|
496 set_location_valid(i, true);
|
duke@0
|
497 jint* dst = (jint*) register_location(i);
|
duke@0
|
498 *dst = *src;
|
duke@0
|
499 }
|
duke@0
|
500 }
|
duke@0
|
501 }
|
duke@0
|
502 }
|
duke@0
|
503
|
never@2466
|
504 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
|
duke@0
|
505 // stack picture
|
duke@0
|
506 // unpack_frame
|
duke@0
|
507 // [new interpreter frames ] (frames are skeletal but walkable)
|
duke@0
|
508 // caller_frame
|
duke@0
|
509 //
|
duke@0
|
510 // This routine fills in the missing data for the skeletal interpreter frames
|
duke@0
|
511 // in the above picture.
|
duke@0
|
512
|
duke@0
|
513 // Find the skeletal interpreter frames to unpack into
|
twisti@3534
|
514 JavaThread* THREAD = JavaThread::current();
|
twisti@3534
|
515 RegisterMap map(THREAD, false);
|
duke@0
|
516 // Get the youngest frame we will unpack (last to be unpacked)
|
duke@0
|
517 frame me = unpack_frame.sender(&map);
|
duke@0
|
518 int index;
|
duke@0
|
519 for (index = 0; index < frames(); index++ ) {
|
duke@0
|
520 *element(index)->iframe() = me;
|
duke@0
|
521 // Get the caller frame (possibly skeletal)
|
duke@0
|
522 me = me.sender(&map);
|
duke@0
|
523 }
|
duke@0
|
524
|
twisti@3534
|
525 // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
|
twisti@3534
|
526 // Unpack the frames from the oldest (frames() -1) to the youngest (0)
|
duke@0
|
527 frame caller_frame = me;
|
duke@0
|
528 for (index = frames() - 1; index >= 0 ; index--) {
|
twisti@3534
|
529 vframeArrayElement* elem = element(index); // caller
|
twisti@3534
|
530 int callee_parameters, callee_locals;
|
twisti@3534
|
531 if (index == 0) {
|
twisti@3534
|
532 callee_parameters = callee_locals = 0;
|
twisti@3534
|
533 } else {
|
twisti@3534
|
534 methodHandle caller = elem->method();
|
twisti@3534
|
535 methodHandle callee = element(index - 1)->method();
|
twisti@3534
|
536 Bytecode_invoke inv(caller, elem->bci());
|
twisti@3534
|
537 // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
|
twisti@3534
|
538 // NOTE: Use machinery here that avoids resolving of any kind.
|
twisti@3534
|
539 const bool has_member_arg =
|
twisti@3534
|
540 !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
|
twisti@3534
|
541 callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
|
twisti@3534
|
542 callee_locals = callee->max_locals();
|
twisti@3534
|
543 }
|
twisti@3534
|
544 elem->unpack_on_stack(caller_actual_parameters,
|
twisti@3534
|
545 callee_parameters,
|
twisti@3534
|
546 callee_locals,
|
twisti@3534
|
547 &caller_frame,
|
twisti@3534
|
548 index == 0,
|
twisti@3534
|
549 exec_mode);
|
duke@0
|
550 if (index == frames() - 1) {
|
twisti@3534
|
551 Deoptimization::unwind_callee_save_values(elem->iframe(), this);
|
duke@0
|
552 }
|
twisti@3534
|
553 caller_frame = *elem->iframe();
|
never@2466
|
554 caller_actual_parameters = callee_parameters;
|
duke@0
|
555 }
|
duke@0
|
556 deallocate_monitor_chunks();
|
duke@0
|
557 }
|
duke@0
|
558
|
duke@0
|
559 void vframeArray::deallocate_monitor_chunks() {
|
duke@0
|
560 JavaThread* jt = JavaThread::current();
|
duke@0
|
561 for (int index = 0; index < frames(); index++ ) {
|
duke@0
|
562 element(index)->free_monitors(jt);
|
duke@0
|
563 }
|
duke@0
|
564 }
|
duke@0
|
565
|
duke@0
|
566 #ifndef PRODUCT
|
duke@0
|
567
|
duke@0
|
568 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
|
duke@0
|
569 if (owner_thread() != thread) return false;
|
duke@0
|
570 int index = 0;
|
duke@0
|
571 #if 0 // FIXME can't do this comparison
|
duke@0
|
572
|
duke@0
|
573 // Compare only within vframe array.
|
duke@0
|
574 for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
|
duke@0
|
575 if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
|
duke@0
|
576 index++;
|
duke@0
|
577 }
|
duke@0
|
578 if (index != chunk->length()) return false;
|
duke@0
|
579 #endif
|
duke@0
|
580
|
duke@0
|
581 return true;
|
duke@0
|
582 }
|
duke@0
|
583
|
duke@0
|
584 #endif
|
duke@0
|
585
|
duke@0
|
586 address vframeArray::register_location(int i) const {
|
duke@0
|
587 assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
|
duke@0
|
588 return (address) & _callee_registers[i];
|
duke@0
|
589 }
|
duke@0
|
590
|
duke@0
|
591
|
duke@0
|
592 #ifndef PRODUCT
|
duke@0
|
593
|
duke@0
|
594 // Printing
|
duke@0
|
595
|
duke@0
|
596 // Note: we cannot have print_on as const, as we allocate inside the method
|
duke@0
|
597 void vframeArray::print_on_2(outputStream* st) {
|
duke@0
|
598 st->print_cr(" - sp: " INTPTR_FORMAT, sp());
|
duke@0
|
599 st->print(" - thread: ");
|
duke@0
|
600 Thread::current()->print();
|
duke@0
|
601 st->print_cr(" - frame size: %d", frame_size());
|
duke@0
|
602 for (int index = 0; index < frames() ; index++ ) {
|
duke@0
|
603 element(index)->print(st);
|
duke@0
|
604 }
|
duke@0
|
605 }
|
duke@0
|
606
|
duke@0
|
607 void vframeArrayElement::print(outputStream* st) {
|
kvn@1255
|
608 st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
|
duke@0
|
609 }
|
duke@0
|
610
|
duke@0
|
611 void vframeArray::print_value_on(outputStream* st) const {
|
duke@0
|
612 st->print_cr("vframeArray [%d] ", frames());
|
duke@0
|
613 }
|
duke@0
|
614
|
duke@0
|
615
|
duke@0
|
616 #endif
|