comparison src/share/vm/oops/cpCacheOop.cpp @ 0:a61af66fc99e

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:fe4e22986fd5
1 /*
2 * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_cpCacheOop.cpp.incl"
27
28
29 // Implememtation of ConstantPoolCacheEntry
30
31 void ConstantPoolCacheEntry::set_initial_state(int index) {
32 assert(0 <= index && index < 0x10000, "sanity check");
33 _indices = index;
34 }
35
36
37 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
38 bool is_vfinal, bool is_volatile,
39 bool is_method_interface, bool is_method) {
40 int f = state;
41
42 assert( state < number_of_states, "Invalid state in as_flags");
43
44 f <<= 1;
45 if (is_final) f |= 1;
46 f <<= 1;
47 if (is_vfinal) f |= 1;
48 f <<= 1;
49 if (is_volatile) f |= 1;
50 f <<= 1;
51 if (is_method_interface) f |= 1;
52 f <<= 1;
53 if (is_method) f |= 1;
54 f <<= ConstantPoolCacheEntry::hotSwapBit;
55 // Preserve existing flag bit values
56 #ifdef ASSERT
57 int old_state = ((_flags >> tosBits) & 0x0F);
58 assert(old_state == 0 || old_state == state,
59 "inconsistent cpCache flags state");
60 #endif
61 return (_flags | f) ;
62 }
63
64 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
65 #ifdef ASSERT
66 // Read once.
67 volatile Bytecodes::Code c = bytecode_1();
68 assert(c == 0 || c == code || code == 0, "update must be consistent");
69 #endif
70 // Need to flush pending stores here before bytecode is written.
71 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
72 }
73
74 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
75 #ifdef ASSERT
76 // Read once.
77 volatile Bytecodes::Code c = bytecode_2();
78 assert(c == 0 || c == code || code == 0, "update must be consistent");
79 #endif
80 // Need to flush pending stores here before bytecode is written.
81 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
82 }
83
84 #ifdef ASSERT
85 // It is possible to have two different dummy methodOops created
86 // when the resolve code for invoke interface executes concurrently
87 // Hence the assertion below is weakened a bit for the invokeinterface
88 // case.
89 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
90 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
91 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
92 ((methodOop)f1)->signature());
93 }
94 #endif
95
96 // Note that concurrent update of both bytecodes can leave one of them
97 // reset to zero. This is harmless; the interpreter will simply re-resolve
98 // the damaged entry. More seriously, the memory synchronization is needed
99 // to flush other fields (f1, f2) completely to memory before the bytecodes
100 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
101 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
102 Bytecodes::Code put_code,
103 KlassHandle field_holder,
104 int orig_field_index,
105 int field_offset,
106 TosState field_type,
107 bool is_final,
108 bool is_volatile) {
109 set_f1(field_holder());
110 set_f2(field_offset);
111 // The field index is used by jvm/ti and is the index into fields() array
112 // in holder instanceKlass. This is scaled by instanceKlass::next_offset.
113 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
114 const int field_index = orig_field_index / instanceKlass::next_offset;
115 assert(field_index <= field_index_mask,
116 "field index does not fit in low flag bits");
117 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
118 (field_index & field_index_mask));
119 set_bytecode_1(get_code);
120 set_bytecode_2(put_code);
121 NOT_PRODUCT(verify(tty));
122 }
123
124 int ConstantPoolCacheEntry::field_index() const {
125 return (_flags & field_index_mask) * instanceKlass::next_offset;
126 }
127
128 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
129 methodHandle method,
130 int vtable_index) {
131
132 assert(method->interpreter_entry() != NULL, "should have been set at this point");
133 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache");
134 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
135
136 int byte_no = -1;
137 bool needs_vfinal_flag = false;
138 switch (invoke_code) {
139 case Bytecodes::_invokevirtual:
140 case Bytecodes::_invokeinterface: {
141 if (method->can_be_statically_bound()) {
142 set_f2((intptr_t)method());
143 needs_vfinal_flag = true;
144 } else {
145 assert(vtable_index >= 0, "valid index");
146 set_f2(vtable_index);
147 }
148 byte_no = 2;
149 break;
150 }
151 case Bytecodes::_invokespecial:
152 // Preserve the value of the vfinal flag on invokevirtual bytecode
153 // which may be shared with this constant pool cache entry.
154 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
155 // fall through
156 case Bytecodes::_invokestatic:
157 set_f1(method());
158 byte_no = 1;
159 break;
160 default:
161 ShouldNotReachHere();
162 break;
163 }
164
165 set_flags(as_flags(as_TosState(method->result_type()),
166 method->is_final_method(),
167 needs_vfinal_flag,
168 false,
169 change_to_virtual,
170 true)|
171 method()->size_of_parameters());
172
173 // Note: byte_no also appears in TemplateTable::resolve.
174 if (byte_no == 1) {
175 set_bytecode_1(invoke_code);
176 } else if (byte_no == 2) {
177 if (change_to_virtual) {
178 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
179 //
180 // Workaround for the case where we encounter an invokeinterface, but we
181 // should really have an _invokevirtual since the resolved method is a
182 // virtual method in java.lang.Object. This is a corner case in the spec
183 // but is presumably legal. javac does not generate this code.
184 //
185 // We set bytecode_1() to _invokeinterface, because that is the
186 // bytecode # used by the interpreter to see if it is resolved.
187 // We set bytecode_2() to _invokevirtual.
188 // See also interpreterRuntime.cpp. (8/25/2000)
189 // Only set resolved for the invokeinterface case if method is public.
190 // Otherwise, the method needs to be reresolved with caller for each
191 // interface call.
192 if (method->is_public()) set_bytecode_1(invoke_code);
193 set_bytecode_2(Bytecodes::_invokevirtual);
194 } else {
195 set_bytecode_2(invoke_code);
196 }
197 } else {
198 ShouldNotReachHere();
199 }
200 NOT_PRODUCT(verify(tty));
201 }
202
203
204 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
205 klassOop interf = method->method_holder();
206 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
207 set_f1(interf);
208 set_f2(index);
209 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
210 set_bytecode_1(Bytecodes::_invokeinterface);
211 }
212
213
214 class LocalOopClosure: public OopClosure {
215 private:
216 void (*_f)(oop*);
217
218 public:
219 LocalOopClosure(void f(oop*)) { _f = f; }
220 virtual void do_oop(oop* o) { _f(o); }
221 };
222
223
224 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
225 LocalOopClosure blk(f);
226 oop_iterate(&blk);
227 }
228
229
230 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
231 assert(in_words(size()) == 4, "check code below - may need adjustment");
232 // field[1] is always oop or NULL
233 blk->do_oop((oop*)&_f1);
234 if (is_vfinal()) {
235 blk->do_oop((oop*)&_f2);
236 }
237 }
238
239
240 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
241 assert(in_words(size()) == 4, "check code below - may need adjustment");
242 // field[1] is always oop or NULL
243 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
244 if (is_vfinal()) {
245 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
246 }
247 }
248
249
250 void ConstantPoolCacheEntry::follow_contents() {
251 assert(in_words(size()) == 4, "check code below - may need adjustment");
252 // field[1] is always oop or NULL
253 MarkSweep::mark_and_push((oop*)&_f1);
254 if (is_vfinal()) {
255 MarkSweep::mark_and_push((oop*)&_f2);
256 }
257 }
258
259 #ifndef SERIALGC
260 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
261 assert(in_words(size()) == 4, "check code below - may need adjustment");
262 // field[1] is always oop or NULL
263 PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
264 if (is_vfinal()) {
265 PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
266 }
267 }
268 #endif // SERIALGC
269
270 void ConstantPoolCacheEntry::adjust_pointers() {
271 assert(in_words(size()) == 4, "check code below - may need adjustment");
272 // field[1] is always oop or NULL
273 MarkSweep::adjust_pointer((oop*)&_f1);
274 if (is_vfinal()) {
275 MarkSweep::adjust_pointer((oop*)&_f2);
276 }
277 }
278
279 #ifndef SERIALGC
280 void ConstantPoolCacheEntry::update_pointers() {
281 assert(in_words(size()) == 4, "check code below - may need adjustment");
282 // field[1] is always oop or NULL
283 PSParallelCompact::adjust_pointer((oop*)&_f1);
284 if (is_vfinal()) {
285 PSParallelCompact::adjust_pointer((oop*)&_f2);
286 }
287 }
288
289 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
290 HeapWord* end_addr) {
291 assert(in_words(size()) == 4, "check code below - may need adjustment");
292 // field[1] is always oop or NULL
293 PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
294 if (is_vfinal()) {
295 PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
296 }
297 }
298 #endif // SERIALGC
299
300 // RedefineClasses() API support:
301 // If this constantPoolCacheEntry refers to old_method then update it
302 // to refer to new_method.
303 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
304 methodOop new_method, bool * trace_name_printed) {
305
306 if (is_vfinal()) {
307 // virtual and final so f2() contains method ptr instead of vtable index
308 if (f2() == (intptr_t)old_method) {
309 // match old_method so need an update
310 _f2 = (intptr_t)new_method;
311 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
312 if (!(*trace_name_printed)) {
313 // RC_TRACE_MESG macro has an embedded ResourceMark
314 RC_TRACE_MESG(("adjust: name=%s",
315 Klass::cast(old_method->method_holder())->external_name()));
316 *trace_name_printed = true;
317 }
318 // RC_TRACE macro has an embedded ResourceMark
319 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
320 new_method->name()->as_C_string(),
321 new_method->signature()->as_C_string()));
322 }
323
324 return true;
325 }
326
327 // f1() is not used with virtual entries so bail out
328 return false;
329 }
330
331 if ((oop)_f1 == NULL) {
332 // NULL f1() means this is a virtual entry so bail out
333 // We are assuming that the vtable index does not need change.
334 return false;
335 }
336
337 if ((oop)_f1 == old_method) {
338 _f1 = new_method;
339 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
340 if (!(*trace_name_printed)) {
341 // RC_TRACE_MESG macro has an embedded ResourceMark
342 RC_TRACE_MESG(("adjust: name=%s",
343 Klass::cast(old_method->method_holder())->external_name()));
344 *trace_name_printed = true;
345 }
346 // RC_TRACE macro has an embedded ResourceMark
347 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
348 new_method->name()->as_C_string(),
349 new_method->signature()->as_C_string()));
350 }
351
352 return true;
353 }
354
355 return false;
356 }
357
358 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
359 if (!is_method_entry()) {
360 // not a method entry so not interesting by default
361 return false;
362 }
363
364 methodOop m = NULL;
365 if (is_vfinal()) {
366 // virtual and final so _f2 contains method ptr instead of vtable index
367 m = (methodOop)_f2;
368 } else if ((oop)_f1 == NULL) {
369 // NULL _f1 means this is a virtual entry so also not interesting
370 return false;
371 } else {
372 if (!((oop)_f1)->is_method()) {
373 // _f1 can also contain a klassOop for an interface
374 return false;
375 }
376 m = (methodOop)_f1;
377 }
378
379 assert(m != NULL && m->is_method(), "sanity check");
380 if (m == NULL || !m->is_method() || m->method_holder() != k) {
381 // robustness for above sanity checks or method is not in
382 // the interesting class
383 return false;
384 }
385
386 // the method is in the interesting class so the entry is interesting
387 return true;
388 }
389
390 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
391 // print separator
392 if (index == 0) tty->print_cr(" -------------");
393 // print entry
394 tty->print_cr("%3d (%08x) [%02x|%02x|%5d]", index, this, bytecode_2(), bytecode_1(), constant_pool_index());
395 tty->print_cr(" [ %08x]", (address)(oop)_f1);
396 tty->print_cr(" [ %08x]", _f2);
397 tty->print_cr(" [ %08x]", _flags);
398 tty->print_cr(" -------------");
399 }
400
401 void ConstantPoolCacheEntry::verify(outputStream* st) const {
402 // not implemented yet
403 }
404
405 // Implementation of ConstantPoolCache
406
407 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
408 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
409 for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
410 }
411
412 // RedefineClasses() API support:
413 // If any entry of this constantPoolCache points to any of
414 // old_methods, replace it with the corresponding new_method.
415 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
416 int methods_length, bool * trace_name_printed) {
417
418 if (methods_length == 0) {
419 // nothing to do if there are no methods
420 return;
421 }
422
423 // get shorthand for the interesting class
424 klassOop old_holder = old_methods[0]->method_holder();
425
426 for (int i = 0; i < length(); i++) {
427 if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
428 // skip uninteresting methods
429 continue;
430 }
431
432 // The constantPoolCache contains entries for several different
433 // things, but we only care about methods. In fact, we only care
434 // about methods in the same class as the one that contains the
435 // old_methods. At this point, we have an interesting entry.
436
437 for (int j = 0; j < methods_length; j++) {
438 methodOop old_method = old_methods[j];
439 methodOop new_method = new_methods[j];
440
441 if (entry_at(i)->adjust_method_entry(old_method, new_method,
442 trace_name_printed)) {
443 // current old_method matched this entry and we updated it so
444 // break out and get to the next interesting entry if there one
445 break;
446 }
447 }
448 }
449 }