changeset 5646:41cb10cbfb3c

8025937: assert(existing_f1 == NULL || existing_f1 == f1) failed: illegal field change Summary: Create extra constant pool cache entries for invokespecial/InterfaceMethodref to hold the alternate resolution. Reviewed-by: jrose, lfoltan, hseigel
author coleenp
date Wed, 13 Nov 2013 16:42:24 -0500
parents fce21ac5968d
children 755c423791ab df0df745224c 3edddbff4865
files src/share/vm/interpreter/rewriter.cpp src/share/vm/interpreter/rewriter.hpp src/share/vm/oops/cpCache.cpp src/share/vm/oops/cpCache.hpp
diffstat 4 files changed, 204 insertions(+), 54 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/interpreter/rewriter.cpp	Wed Nov 13 07:31:26 2013 -0800
+++ b/src/share/vm/interpreter/rewriter.cpp	Wed Nov 13 16:42:24 2013 -0500
@@ -70,21 +70,21 @@
 }
 
 // Unrewrite the bytecodes if an error occurs.
-void Rewriter::restore_bytecodes() {
+void Rewriter::restore_bytecodes(TRAPS) {
   int len = _methods->length();
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method, true);
+    scan_method(method, true, CHECK);
   }
 }
 
 // Creates a constant pool cache given a CPC map
 void Rewriter::make_constant_pool_cache(TRAPS) {
-  const int length = _cp_cache_map.length();
   ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
   ConstantPoolCache* cache =
-      ConstantPoolCache::allocate(loader_data, length, _cp_cache_map,
+      ConstantPoolCache::allocate(loader_data, _cp_cache_map,
+                                  _invokedynamic_cp_cache_map,
                                   _invokedynamic_references_map, CHECK);
 
   // initialize object cache in constant pool
@@ -154,6 +154,31 @@
   }
 }
 
+// If the constant pool entry for invokespecial is InterfaceMethodref,
+// we need to add a separate cpCache entry for its resolution, because it is
+// different than the resolution for invokeinterface with InterfaceMethodref.
+// These cannot share cpCache entries.  It's unclear if all invokespecial to
+// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
+// is created for each one.  This was added with lambda.
+void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) {
+  static int count = 0;
+  address p = bcp + offset;
+  if (!reverse) {
+    int cp_index = Bytes::get_Java_u2(p);
+    int cache_index = add_invokespecial_cp_cache_entry(cp_index);
+    if (cache_index != (int)(jushort) cache_index) {
+      THROW_MSG(vmSymbols::java_lang_InternalError(),
+                "This classfile overflows invokespecial for interfaces "
+                "and cannot be loaded");
+    }
+    Bytes::put_native_u2(p, cache_index);
+  } else {
+    int cache_index = Bytes::get_native_u2(p);
+    int cp_index = cp_cache_entry_pool_index(cache_index);
+    Bytes::put_Java_u2(p, cp_index);
+  }
+}
+
 
 // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
 void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
@@ -203,7 +228,7 @@
   if (!reverse) {
     int cp_index = Bytes::get_Java_u2(p);
     int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
-    add_invokedynamic_resolved_references_entries(cp_index, cache_index);
+    int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
     // Replace the trailing four bytes with a CPC index for the dynamic
     // call site.  Unlike other CPC entries, there is one per bytecode,
     // not just one per distinct CP entry.  In other words, the
@@ -212,13 +237,20 @@
     // all these entries.  That is the main reason invokedynamic
     // must have a five-byte instruction format.  (Of course, other JVM
     // implementations can use the bytes for other purposes.)
+    // Note: We use native_u4 format exclusively for 4-byte indexes.
     Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
-    // Note: We use native_u4 format exclusively for 4-byte indexes.
+    // add the bcp in case we need to patch this bytecode if we also find a
+    // invokespecial/InterfaceMethodref in the bytecode stream
+    _patch_invokedynamic_bcps->push(p);
+    _patch_invokedynamic_refs->push(resolved_index);
   } else {
-    // callsite index
     int cache_index = ConstantPool::decode_invokedynamic_index(
                         Bytes::get_native_u4(p));
-    int cp_index = cp_cache_entry_pool_index(cache_index);
+    // We will reverse the bytecode rewriting _after_ adjusting them.
+    // Adjust the cache index by offset to the invokedynamic entries in the
+    // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
+    cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
+    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
     assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
     // zero out 4 bytes
     Bytes::put_Java_u4(p, 0);
@@ -226,6 +258,34 @@
   }
 }
 
+void Rewriter::patch_invokedynamic_bytecodes() {
+  // If the end of the cp_cache is the same as after initializing with the
+  // cpool, nothing needs to be done.  Invokedynamic bytecodes are at the
+  // correct offsets. ie. no invokespecials added
+  int delta = cp_cache_delta();
+  if (delta > 0) {
+    int length = _patch_invokedynamic_bcps->length();
+    assert(length == _patch_invokedynamic_refs->length(),
+           "lengths should match");
+    for (int i = 0; i < length; i++) {
+      address p = _patch_invokedynamic_bcps->at(i);
+      int cache_index = ConstantPool::decode_invokedynamic_index(
+                          Bytes::get_native_u4(p));
+      Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta));
+
+      // invokedynamic resolved references map also points to cp cache and must
+      // add delta to each.
+      int resolved_index = _patch_invokedynamic_refs->at(i);
+      for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
+        assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
+             "should be the same index");
+        _invokedynamic_references_map.at_put(resolved_index+entry,
+                                             cache_index + delta);
+      }
+    }
+  }
+}
+
 
 // Rewrite some ldc bytecodes to _fast_aldc
 void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
@@ -269,7 +329,7 @@
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(Method* method, bool reverse) {
+void Rewriter::scan_method(Method* method, bool reverse, TRAPS) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -329,12 +389,25 @@
 #endif
           break;
         }
+
+        case Bytecodes::_invokespecial  : {
+          int offset = prefix_length + 1;
+          address p = bcp + offset;
+          int cp_index = Bytes::get_Java_u2(p);
+          // InterfaceMethodref
+          if (_pool->tag_at(cp_index).is_interface_method()) {
+            rewrite_invokespecial(bcp, offset, reverse, CHECK);
+          } else {
+            rewrite_member_reference(bcp, offset, reverse);
+          }
+          break;
+        }
+
         case Bytecodes::_getstatic      : // fall through
         case Bytecodes::_putstatic      : // fall through
         case Bytecodes::_getfield       : // fall through
         case Bytecodes::_putfield       : // fall through
         case Bytecodes::_invokevirtual  : // fall through
-        case Bytecodes::_invokespecial  : // fall through
         case Bytecodes::_invokestatic   :
         case Bytecodes::_invokeinterface:
         case Bytecodes::_invokehandle   : // if reverse=true
@@ -426,16 +499,21 @@
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method);
+    scan_method(method, false, CHECK);  // If you get an error here,
+                                        // there is no reversing bytecodes
   }
 
+  // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
+  // entries had to be added.
+  patch_invokedynamic_bytecodes();
+
   // allocate constant pool cache, now that we've seen all the bytecodes
   make_constant_pool_cache(THREAD);
 
   // Restore bytecodes to their unrewritten state if there are exceptions
   // rewriting bytecodes or allocating the cpCache
   if (HAS_PENDING_EXCEPTION) {
-    restore_bytecodes();
+    restore_bytecodes(CATCH);
     return;
   }
 
@@ -452,7 +530,7 @@
       // relocating bytecodes.  If some are relocated, that is ok because that
       // doesn't affect constant pool to cpCache rewriting.
       if (HAS_PENDING_EXCEPTION) {
-        restore_bytecodes();
+        restore_bytecodes(CATCH);
         return;
       }
       // Method might have gotten rewritten.
--- a/src/share/vm/interpreter/rewriter.hpp	Wed Nov 13 07:31:26 2013 -0800
+++ b/src/share/vm/interpreter/rewriter.hpp	Wed Nov 13 16:42:24 2013 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,55 +46,102 @@
   intArray            _method_handle_invokers;
   int                 _resolved_reference_limit;
 
+  // For mapping invokedynamic bytecodes, which are discovered during method
+  // scanning.  The invokedynamic entries are added at the end of the cpCache.
+  // If there are any invokespecial/InterfaceMethodref special case bytecodes,
+  // these entries are added before invokedynamic entries so that the
+  // invokespecial bytecode 16 bit index doesn't overflow.
+  intStack            _invokedynamic_cp_cache_map;
+
+  // For patching.
+  GrowableArray<address>* _patch_invokedynamic_bcps;
+  GrowableArray<int>*     _patch_invokedynamic_refs;
+
   void init_maps(int length) {
     _cp_map.initialize(length, -1);
     // Choose an initial value large enough that we don't get frequent
     // calls to grow().
-    _cp_cache_map.initialize(length / 2);
+    _cp_cache_map.initialize(length/2);
     // Also cache resolved objects, in another different cache.
     _reference_map.initialize(length, -1);
-    _resolved_references_map.initialize(length / 2);
-    _invokedynamic_references_map.initialize(length / 2);
+    _resolved_references_map.initialize(length/2);
+    _invokedynamic_references_map.initialize(length/2);
     _resolved_reference_limit = -1;
-    DEBUG_ONLY(_cp_cache_index_limit = -1);
+    _first_iteration_cp_cache_limit = -1;
+
+    // invokedynamic specific fields
+    _invokedynamic_cp_cache_map.initialize(length/4);
+    _patch_invokedynamic_bcps = new GrowableArray<address>(length/4);
+    _patch_invokedynamic_refs = new GrowableArray<int>(length/4);
   }
 
-  int _cp_cache_index_limit;
+  int _first_iteration_cp_cache_limit;
   void record_map_limits() {
-#ifdef ASSERT
-    // Record initial size of the two arrays generated for the CP cache:
-    _cp_cache_index_limit = _cp_cache_map.length();
-#endif //ASSERT
+    // Record initial size of the two arrays generated for the CP cache
+    // relative to walking the constant pool.
+    _first_iteration_cp_cache_limit = _cp_cache_map.length();
     _resolved_reference_limit = _resolved_references_map.length();
   }
 
+  int cp_cache_delta() {
+    // How many cp cache entries were added since recording map limits after
+    // cp cache initialization?
+    assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
+    return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
+  }
+
   int  cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
   bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
 
+  int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) {
+    assert(cp_map->at(cp_index) == -1, "not twice on same cp_index");
+    int cache_index = cp_cache_map->append(cp_index);
+    cp_map->at_put(cp_index, cache_index);
+    return cache_index;
+  }
+
   int add_cp_cache_entry(int cp_index) {
     assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
-    assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
-    assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration");
-    int cache_index = _cp_cache_map.append(cp_index);
-    _cp_map.at_put(cp_index, cache_index);
+    assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
+    int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
     assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
     assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
     return cache_index;
   }
 
-  // add a new CP cache entry beyond the normal cache (for invokedynamic only)
   int add_invokedynamic_cp_cache_entry(int cp_index) {
     assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
-    assert(_cp_map[cp_index] == -1, "do not map from cp_index");
-    assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration");
+    assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration");
+    // add to the invokedynamic index map.
+    int cache_index = _invokedynamic_cp_cache_map.append(cp_index);
+    // do not update _cp_map, since the mapping is one-to-many
+    assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, "");
+    // this index starts at one but in the bytecode it's appended to the end.
+    return cache_index + _first_iteration_cp_cache_limit;
+  }
+
+  int invokedynamic_cp_cache_entry_pool_index(int cache_index) {
+    int cp_index = _invokedynamic_cp_cache_map[cache_index];
+    return cp_index;
+  }
+
+  // add a new CP cache entry beyond the normal cache for the special case of
+  // invokespecial with InterfaceMethodref as cpool operand.
+  int add_invokespecial_cp_cache_entry(int cp_index) {
+    assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
+    // Don't add InterfaceMethodref if it already exists at the end.
+    for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
+     if (cp_cache_entry_pool_index(i) == cp_index) {
+       return i;
+     }
+    }
     int cache_index = _cp_cache_map.append(cp_index);
-    assert(cache_index >= _cp_cache_index_limit, "");
+    assert(cache_index >= _first_iteration_cp_cache_limit, "");
     // do not update _cp_map, since the mapping is one-to-many
     assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
     return cache_index;
   }
 
-  // fix duplicated code later
   int  cp_entry_to_resolved_references(int cp_index) const {
     assert(has_entry_in_resolved_references(cp_index), "oob");
     return _reference_map[cp_index];
@@ -105,10 +152,7 @@
 
   // add a new entry to the resolved_references map
   int add_resolved_references_entry(int cp_index) {
-    assert(_reference_map[cp_index] == -1, "not twice on same cp_index");
-    assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration");
-    int ref_index = _resolved_references_map.append(cp_index);
-    _reference_map.at_put(cp_index, ref_index);
+    int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
     assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
     return ref_index;
   }
@@ -137,7 +181,7 @@
   // Access the contents of _cp_cache_map to determine CP cache layout.
   int cp_cache_entry_pool_index(int cache_index) {
     int cp_index = _cp_cache_map[cache_index];
-      return cp_index;
+    return cp_index;
   }
 
   // All the work goes in here:
@@ -145,14 +189,18 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(Method* m, bool reverse = false);
+  void scan_method(Method* m, bool reverse, TRAPS);
   void rewrite_Object_init(methodHandle m, TRAPS);
-  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
-  void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false);
-  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
-  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
+  void rewrite_member_reference(address bcp, int offset, bool reverse);
+  void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
+  void rewrite_invokedynamic(address bcp, int offset, bool reverse);
+  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse);
+  void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS);
+
+  void patch_invokedynamic_bytecodes();
+
   // Revert bytecodes in case of an exception.
-  void restore_bytecodes();
+  void restore_bytecodes(TRAPS);
 
   static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
  public:
--- a/src/share/vm/oops/cpCache.cpp	Wed Nov 13 07:31:26 2013 -0800
+++ b/src/share/vm/oops/cpCache.cpp	Wed Nov 13 16:42:24 2013 -0500
@@ -554,24 +554,37 @@
 // Implementation of ConstantPoolCache
 
 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
-                                     int length,
                                      const intStack& index_map,
+                                     const intStack& invokedynamic_index_map,
                                      const intStack& invokedynamic_map, TRAPS) {
+
+  const int length = index_map.length() + invokedynamic_index_map.length();
   int size = ConstantPoolCache::size(length);
 
   return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
-    ConstantPoolCache(length, index_map, invokedynamic_map);
+    ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
 }
 
 void ConstantPoolCache::initialize(const intArray& inverse_index_map,
+                                   const intArray& invokedynamic_inverse_index_map,
                                    const intArray& invokedynamic_references_map) {
-  assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
-  for (int i = 0; i < length(); i++) {
+  for (int i = 0; i < inverse_index_map.length(); i++) {
     ConstantPoolCacheEntry* e = entry_at(i);
     int original_index = inverse_index_map[i];
     e->initialize_entry(original_index);
     assert(entry_at(i) == e, "sanity");
   }
+
+  // Append invokedynamic entries at the end
+  int invokedynamic_offset = inverse_index_map.length();
+  for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
+    int offset = i + invokedynamic_offset;
+    ConstantPoolCacheEntry* e = entry_at(offset);
+    int original_index = invokedynamic_inverse_index_map[i];
+    e->initialize_entry(original_index);
+    assert(entry_at(offset) == e, "sanity");
+  }
+
   for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
     const int cpci = invokedynamic_references_map[ref];
     if (cpci >= 0) {
--- a/src/share/vm/oops/cpCache.hpp	Wed Nov 13 07:31:26 2013 -0800
+++ b/src/share/vm/oops/cpCache.hpp	Wed Nov 13 16:42:24 2013 -0500
@@ -31,6 +31,10 @@
 
 class PSPromotionManager;
 
+// The ConstantPoolCache is not a cache! It is the resolution table that the
+// interpreter uses to avoid going into the runtime and a way to access resolved
+// values.
+
 // A ConstantPoolCacheEntry describes an individual entry of the constant
 // pool cache. There's 2 principal kinds of entries: field entries for in-
 // stance & static field access, and method entries for invokes. Some of
@@ -392,26 +396,33 @@
   friend class MetadataFactory;
  private:
   int             _length;
-  ConstantPool* _constant_pool;                // the corresponding constant pool
+  ConstantPool*   _constant_pool;          // the corresponding constant pool
 
   // Sizing
   debug_only(friend class ClassVerifier;)
 
   // Constructor
-  ConstantPoolCache(int length, const intStack& inverse_index_map,
+  ConstantPoolCache(int length,
+                    const intStack& inverse_index_map,
+                    const intStack& invokedynamic_inverse_index_map,
                     const intStack& invokedynamic_references_map) :
-                                        _length(length), _constant_pool(NULL) {
-    initialize(inverse_index_map, invokedynamic_references_map);
+                          _length(length),
+                          _constant_pool(NULL) {
+    initialize(inverse_index_map, invokedynamic_inverse_index_map,
+               invokedynamic_references_map);
     for (int i = 0; i < length; i++) {
       assert(entry_at(i)->is_f1_null(), "Failed to clear?");
     }
   }
 
   // Initialization
-  void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map);
+  void initialize(const intArray& inverse_index_map,
+                  const intArray& invokedynamic_inverse_index_map,
+                  const intArray& invokedynamic_references_map);
  public:
-  static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length,
-                                     const intStack& inverse_index_map,
+  static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
+                                     const intStack& cp_cache_map,
+                                     const intStack& invokedynamic_cp_cache_map,
                                      const intStack& invokedynamic_references_map, TRAPS);
   bool is_constantPoolCache() const { return true; }