changeset 508:b6f0babd7cf1

assign some bug numbers
author jrose
date Wed, 17 Oct 2012 21:46:25 -0700
parents f3b15e2870c5
children 37735140b62a
files anno-stable-8001107.patch anno-stable.patch series value-obj-8001111.patch value-obj-8001111.txt value-obj.patch value-obj.txt
diffstat 7 files changed, 2138 insertions(+), 2137 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/anno-stable-8001107.patch	Wed Oct 17 21:46:25 2012 -0700
@@ -0,0 +1,1037 @@
+8001107: @Stable annotation for constant folding of lazily evaluated variables
+
+diff --git a/src/share/vm/ci/ciArray.cpp b/src/share/vm/ci/ciArray.cpp
+--- a/src/share/vm/ci/ciArray.cpp
++++ b/src/share/vm/ci/ciArray.cpp
+@@ -26,12 +26,102 @@
+ #include "ci/ciArray.hpp"
+ #include "ci/ciKlass.hpp"
+ #include "ci/ciUtilities.hpp"
++#include "oops/objArrayOop.hpp"
++#include "oops/typeArrayOop.hpp"
+ 
+ // ciArray
+ //
+ // This class represents an arrayOop in the HotSpot virtual
+ // machine.
+ 
++ciArrayKlass* ciArray::array_type() {
++  return klass()->as_array_klass();
++}
++
++ciType* ciArray::element_type() {
++  return array_type()->element_type();
++}
++
++BasicType ciArray::element_basic_type() {
++  return element_type()->basic_type();
++}
++
++static BasicType fixup_element_type(BasicType bt) {
++  if (bt == T_ARRAY)    return T_OBJECT;
++  if (bt == T_BOOLEAN)  return T_BYTE;
++  return bt;
++}
++
++ciConstant ciArray::element_value_impl(BasicType elembt,
++                                       arrayOop ary,
++                                       int index) {
++  if (ary == NULL)
++    return ciConstant();
++  assert(ary->is_array(), "");
++  if (index < 0 || index >= ary->length())
++    return ciConstant();
++  arrayKlass* ak = (arrayKlass*) ary->klass();
++  BasicType abt = ak->element_type();
++  if (fixup_element_type(elembt) !=
++      fixup_element_type(abt))
++    return ciConstant();
++  switch (elembt) {
++  case T_ARRAY:
++  case T_OBJECT:
++    {
++      assert(ary->is_objArray(), "");
++      objArrayOop objary = (objArrayOop) ary;
++      oop elem = objary->obj_at(index);
++      ciEnv* env = CURRENT_ENV;
++      ciObject* box = env->get_object(elem);
++      return ciConstant(T_OBJECT, box);
++    }
++  }
++  assert(ary->is_typeArray(), "");
++  typeArrayOop tary = (typeArrayOop) ary;
++  jint value = 0;
++  switch (elembt) {
++  case T_LONG:          return ciConstant(tary->long_at(index));
++  case T_FLOAT:         return ciConstant(tary->float_at(index));
++  case T_DOUBLE:        return ciConstant(tary->double_at(index));
++  default:              return ciConstant();
++  case T_BYTE:          value = tary->byte_at(index);           break;
++  case T_BOOLEAN:       value = tary->byte_at(index) & 1;       break;
++  case T_SHORT:         value = tary->short_at(index);          break;
++  case T_CHAR:          value = tary->char_at(index);           break;
++  case T_INT:           value = tary->int_at(index);            break;
++  }
++  return ciConstant(elembt, value);
++}
++
++// ------------------------------------------------------------------
++// ciArray::element_value
++//
++// Current value of an element.
++// Returns T_ILLEGAL if there is no element at the given index.
++ciConstant ciArray::element_value(int index) {
++  BasicType elembt = element_basic_type();
++  GUARDED_VM_ENTRY(
++    return element_value_impl(elembt, get_arrayOop(), index);
++  )
++}
++
++// ------------------------------------------------------------------
++// ciArray::element_value_by_offset
++//
++// Current value of an element at the specified offset.
++// Returns T_ILLEGAL if there is no element at the given offset.
++ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
++  BasicType elembt = element_basic_type();
++  intptr_t shift  = exact_log2(type2aelembytes(elembt));
++  intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
++  intptr_t index = (element_offset - header) >> shift;
++  intptr_t offset = header + ((intptr_t)index << shift);
++  if (offset != element_offset || index != (jint)index)
++    return ciConstant();
++  return element_value((jint) index);
++}
++
+ // ------------------------------------------------------------------
+ // ciArray::print_impl
+ //
+diff --git a/src/share/vm/ci/ciArray.hpp b/src/share/vm/ci/ciArray.hpp
+--- a/src/share/vm/ci/ciArray.hpp
++++ b/src/share/vm/ci/ciArray.hpp
+@@ -51,9 +51,24 @@
+ 
+   void print_impl(outputStream* st);
+ 
++  ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
++
+ public:
+   int length() { return _length; }
+ 
++  // Convenience routines.
++  ciArrayKlass* array_type();       // klass()->as_array_klass()
++  ciType* element_type();           // array_type()->element_type()
++  BasicType element_basic_type();   // element_type()->basic_type()
++
++  // Current value of an element.
++  // Returns T_ILLEGAL if there is no element at the given index.
++  ciConstant element_value(int index);
++
++  // Current value of an element at the specified offset.
++  // Returns T_ILLEGAL if there is no element at the given offset.
++  ciConstant element_value_by_offset(intptr_t element_offset);
++
+   // What kind of ciObject is this?
+   bool is_array()        { return true; }
+   bool is_java_object()  { return true; }
+diff --git a/src/share/vm/ci/ciConstant.hpp b/src/share/vm/ci/ciConstant.hpp
+--- a/src/share/vm/ci/ciConstant.hpp
++++ b/src/share/vm/ci/ciConstant.hpp
+@@ -41,7 +41,6 @@
+   union {
+     jint      _int;
+     jlong     _long;
+-    jint      _long_half[2];
+     jfloat    _float;
+     jdouble   _double;
+     ciObject* _object;
+@@ -111,6 +110,19 @@
+     return _value._object;
+   }
+ 
++  bool     is_null_or_zero() const {
++    if (!is_java_primitive(basic_type()))
++      return as_object()->is_null_object();
++    else if (type2size[basic_type()] == 1)
++      // treat float bits as int, to avoid comparison with -0 and NaN
++      return (_value._int == 0);
++    else if (type2size[basic_type()] == 2)
++      // treat double bits as long, to avoid comparison with -0 and NaN
++      return (_value._long == 0);
++    else
++      return false;
++  }
++
+   // Debugging output
+   void print();
+ };
+diff --git a/src/share/vm/ci/ciField.cpp b/src/share/vm/ci/ciField.cpp
+--- a/src/share/vm/ci/ciField.cpp
++++ b/src/share/vm/ci/ciField.cpp
+@@ -189,12 +189,14 @@
+   _holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
+ 
+   // Check to see if the field is constant.
+-  if (_holder->is_initialized() && this->is_final()) {
++  bool is_final = this->is_final();
++  bool is_stable = this->is_stable();
++  if (_holder->is_initialized() && (is_final || is_stable)) {
+     if (!this->is_static()) {
+       // A field can be constant if it's a final static field or if
+       // it's a final non-static field of a trusted class (classes in
+       // java.lang.invoke and sun.invoke packages and subpackages).
+-      if (trust_final_non_static_fields(_holder)) {
++      if (is_stable || trust_final_non_static_fields(_holder)) {
+         _is_constant = true;
+         return;
+       }
+@@ -227,7 +229,6 @@
+ 
+     Handle mirror = k->java_mirror();
+ 
+-    _is_constant = true;
+     switch(type()->basic_type()) {
+     case T_BYTE:
+       _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
+@@ -273,6 +274,12 @@
+         }
+       }
+     }
++    if (is_stable && _constant_value.is_null_or_zero()) {
++      // It is not a constant after all; treat it as uninitialized.
++      _is_constant = false;
++    } else {
++      _is_constant = true;
++    }
+   } else {
+     _is_constant = false;
+   }
+@@ -373,6 +380,7 @@
+   tty->print(" offset=%d type=", _offset);
+   if (_type != NULL) _type->print_name();
+   else               tty->print("(reference)");
++  tty->print(" flags=%04x", flags().as_int());
+   tty->print(" is_constant=%s", bool_to_str(_is_constant));
+   if (_is_constant && is_static()) {
+     tty->print(" constant_value=");
+diff --git a/src/share/vm/ci/ciField.hpp b/src/share/vm/ci/ciField.hpp
+--- a/src/share/vm/ci/ciField.hpp
++++ b/src/share/vm/ci/ciField.hpp
+@@ -139,7 +139,10 @@
+   //      non-constant fields.  These are java.lang.System.in
+   //      and java.lang.System.out.  Abomination.
+   //
+-  // Note: the check for case 4 is not yet implemented.
++  // A field is also considered constant if it is marked @Stable
++  // and is non-null (or non-zero, if a primitive).
++  // For non-static fields, the null/zero check must be
++  // arranged by the user, as constant_value().is_null_or_zero().
+   bool is_constant() { return _is_constant; }
+ 
+   // Get the constant value of this field.
+@@ -173,6 +176,7 @@
+   bool is_protected   () { return flags().is_protected(); }
+   bool is_static      () { return flags().is_static(); }
+   bool is_final       () { return flags().is_final(); }
++  bool is_stable      () { return flags().is_stable(); }
+   bool is_volatile    () { return flags().is_volatile(); }
+   bool is_transient   () { return flags().is_transient(); }
+ 
+diff --git a/src/share/vm/ci/ciFlags.hpp b/src/share/vm/ci/ciFlags.hpp
+--- a/src/share/vm/ci/ciFlags.hpp
++++ b/src/share/vm/ci/ciFlags.hpp
+@@ -59,6 +59,7 @@
+   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
+   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
+   bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
++  bool is_stable      () const         { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
+ 
+   // Conversion
+   jint   as_int()                      { return _flags; }
+diff --git a/src/share/vm/ci/ciInstance.cpp b/src/share/vm/ci/ciInstance.cpp
+--- a/src/share/vm/ci/ciInstance.cpp
++++ b/src/share/vm/ci/ciInstance.cpp
+@@ -127,6 +127,7 @@
+ ciConstant ciInstance::field_value_by_offset(int field_offset) {
+   ciInstanceKlass* ik = klass()->as_instance_klass();
+   ciField* field = ik->get_field_by_offset(field_offset, false);
++  if (field == NULL)  return ciConstant();  // T_ILLEGAL
+   return field_value(field);
+ }
+ 
+diff --git a/src/share/vm/ci/ciTypeArray.cpp b/src/share/vm/ci/ciTypeArray.cpp
+--- a/src/share/vm/ci/ciTypeArray.cpp
++++ b/src/share/vm/ci/ciTypeArray.cpp
+@@ -39,5 +39,10 @@
+ jchar ciTypeArray::char_at(int index) {
+   VM_ENTRY_MARK;
+   assert(index >= 0 && index < length(), "out of range");
+-  return get_typeArrayOop()->char_at(index);
++  jchar c = element_value(index).as_char();
++#ifdef ASSERT
++  jchar d = get_typeArrayOop()->char_at(index);
++  assert(c == d, "");
++#endif //ASSERT
++  return c;
+ }
+diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
+--- a/src/share/vm/classfile/classFileParser.cpp
++++ b/src/share/vm/classfile/classFileParser.cpp
+@@ -959,6 +959,7 @@
+         runtime_visible_annotations_length = attribute_length;
+         runtime_visible_annotations = cfs->get_u1_buffer();
+         assert(runtime_visible_annotations != NULL, "null visible annotations");
++        parse_annotations(loader_data, runtime_visible_annotations, runtime_visible_annotations_length, cp, parsed_annotations, CHECK);
+         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
+       } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
+         runtime_invisible_annotations_length = attribute_length;
+@@ -1695,7 +1696,8 @@
+ }
+ 
+ // Sift through annotations, looking for those significant to the VM:
+-void ClassFileParser::parse_annotations(u1* buffer, int limit,
++void ClassFileParser::parse_annotations(ClassLoaderData* loader_data,
++                                        u1* buffer, int limit,
+                                         constantPoolHandle cp,
+                                         ClassFileParser::AnnotationCollector* coll,
+                                         TRAPS) {
+@@ -1733,7 +1735,7 @@
+     }
+ 
+     // Here is where parsing particular annotations will take place.
+-    AnnotationCollector::ID id = coll->annotation_index(aname);
++    AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
+     if (id == AnnotationCollector::_unknown)  continue;
+     coll->set_annotation(id);
+     // If there are no values, just set the bit and move on:
+@@ -1762,28 +1764,44 @@
+   }
+ }
+ 
+-ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Symbol* name) {
++ClassFileParser::AnnotationCollector::ID
++ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_data,
++                                                       Symbol* name) {
+   vmSymbols::SID sid = vmSymbols::find_sid(name);
++  bool privileged = false;
++  if (loader_data->is_the_null_class_loader_data()) {
++    // Privileged code can use all annotations.  Other code silently drops some.
++    privileged = true;
++  }
+   switch (sid) {
+   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
+     if (_location != _in_method)  break;  // only allow for methods
++    if (!privileged)              break;  // only allow in privileged code
+     return _method_ForceInline;
+   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_DontInline_signature):
+     if (_location != _in_method)  break;  // only allow for methods
++    if (!privileged)              break;  // only allow in privileged code
+     return _method_DontInline;
+   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
+     if (_location != _in_method)  break;  // only allow for methods
++    if (!privileged)              break;  // only allow in privileged code
+     return _method_LambdaForm_Compiled;
+   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature):
+     if (_location != _in_method)  break;  // only allow for methods
++    if (!privileged)              break;  // only allow in privileged code
+     return _method_LambdaForm_Hidden;
++  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
++    if (_location != _in_field)   break;  // only allow for fields
++    if (!privileged)              break;  // only allow in privileged code
++    return _field_Stable;
+   default: break;
+   }
+   return AnnotationCollector::_unknown;
+ }
+ 
+ void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
+-  fatal("no field annotations yet");
++  if (has_annotation(_field_Stable))
++    f->set_stable(true);
+ }
+ 
+ void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
+@@ -2125,7 +2143,7 @@
+         runtime_visible_annotations_length = method_attribute_length;
+         runtime_visible_annotations = cfs->get_u1_buffer();
+         assert(runtime_visible_annotations != NULL, "null visible annotations");
+-        parse_annotations(runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
++        parse_annotations(loader_data, runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
+         cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
+       } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
+         runtime_invisible_annotations_length = method_attribute_length;
+@@ -2785,7 +2803,8 @@
+         runtime_visible_annotations_length = attribute_length;
+         runtime_visible_annotations = cfs->get_u1_buffer();
+         assert(runtime_visible_annotations != NULL, "null visible annotations");
+-        parse_annotations(runtime_visible_annotations,
++        parse_annotations(loader_data,
++                          runtime_visible_annotations,
+                           runtime_visible_annotations_length,
+                           cp,
+                           parsed_annotations,
+diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp
+--- a/src/share/vm/classfile/classFileParser.hpp
++++ b/src/share/vm/classfile/classFileParser.hpp
+@@ -92,6 +92,7 @@
+       _method_DontInline,
+       _method_LambdaForm_Compiled,
+       _method_LambdaForm_Hidden,
++      _field_Stable,
+       _annotation_LIMIT
+     };
+     const Location _location;
+@@ -102,7 +103,7 @@
+       assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
+     }
+     // If this annotation name has an ID, report it (or _none).
+-    ID annotation_index(Symbol* name);
++    ID annotation_index(ClassLoaderData* loader_data, Symbol* name);
+     // Set the annotation name:
+     void set_annotation(ID id) {
+       assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+@@ -237,7 +238,8 @@
+                                        int runtime_invisible_annotations_length, TRAPS);
+   int skip_annotation(u1* buffer, int limit, int index);
+   int skip_annotation_value(u1* buffer, int limit, int index);
+-  void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
++  void parse_annotations(ClassLoaderData* loader_data,
++                         u1* buffer, int limit, constantPoolHandle cp,
+                          /* Results (currently, only one result is supported): */
+                          AnnotationCollector* result,
+                          TRAPS);
+diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
+--- a/src/share/vm/classfile/vmSymbols.hpp
++++ b/src/share/vm/classfile/vmSymbols.hpp
+@@ -255,6 +255,7 @@
+   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
+   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
+   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
++  template(sun_invoke_Stable_signature,               "Lsun/invoke/Stable;")                      \
+   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
+   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
+   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
+diff --git a/src/share/vm/oops/fieldInfo.hpp b/src/share/vm/oops/fieldInfo.hpp
+--- a/src/share/vm/oops/fieldInfo.hpp
++++ b/src/share/vm/oops/fieldInfo.hpp
+@@ -114,6 +114,14 @@
+     return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
+   }
+ 
++  bool is_stable() const {
++    return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
++  }
++  void set_stable(bool z) {
++    if (z) _shorts[access_flags_offset] |=  JVM_ACC_FIELD_STABLE;
++    else   _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
++  }
++
+   Symbol* lookup_symbol(int symbol_index) const {
+     assert(is_internal(), "only internal fields");
+     return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
+diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
+--- a/src/share/vm/opto/c2_globals.hpp
++++ b/src/share/vm/opto/c2_globals.hpp
+@@ -433,6 +433,9 @@
+   diagnostic(bool, EliminateAutoBox, false,                                 \
+           "Private flag to control optimizations for autobox elimination")  \
+                                                                             \
++  diagnostic(bool, FoldStableValues, false,                                 \
++          "Private flag to control optimizations for stable variables")     \
++                                                                            \
+   product(intx, AutoBoxCacheMax, 128,                                       \
+           "Sets max value cached by the java.lang.Integer autobox cache")   \
+                                                                             \
+diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
+--- a/src/share/vm/opto/compile.cpp
++++ b/src/share/vm/opto/compile.cpp
+@@ -1162,6 +1162,10 @@
+ 
+   // Array pointers need some flattening
+   const TypeAryPtr *ta = tj->isa_aryptr();
++  if (ta && ta->stable()) {
++    // Erase stability property for alias analysis.
++    tj = ta = ta->cast_to_stable(false);
++  }
+   if( ta && is_known_inst ) {
+     if ( offset != Type::OffsetBot &&
+          offset > arrayOopDesc::length_offset_in_bytes() ) {
+@@ -1362,6 +1366,7 @@
+   _index = i;
+   _adr_type = at;
+   _field = NULL;
++  _element = NULL;
+   _is_rewritable = true; // default
+   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
+   if (atoop != NULL && atoop->is_known_instance()) {
+@@ -1480,6 +1485,15 @@
+           && flat->is_instptr()->klass() == env()->Class_klass())
+         alias_type(idx)->set_rewritable(false);
+     }
++    if (flat->isa_aryptr()) {
++#ifdef ASSERT
++      const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
++      // (T_BYTE has the weakest alignment and size restrictions...)
++      assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
++#endif
++      if (flat->offset() == TypePtr::OffsetBot)
++        alias_type(idx)->set_element(flat->is_aryptr()->elem());
++    }
+     if (flat->isa_klassptr()) {
+       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
+         alias_type(idx)->set_rewritable(false);
+@@ -1542,7 +1556,7 @@
+   else
+     t = TypeOopPtr::make_from_klass_raw(field->holder());
+   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
+-  assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
++  assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
+   return atp;
+ }
+ 
+diff --git a/src/share/vm/opto/compile.hpp b/src/share/vm/opto/compile.hpp
+--- a/src/share/vm/opto/compile.hpp
++++ b/src/share/vm/opto/compile.hpp
+@@ -69,6 +69,7 @@
+ class StartNode;
+ class SafePointNode;
+ class JVMState;
++class Type;
+ class TypeData;
+ class TypePtr;
+ class TypeFunc;
+@@ -111,6 +112,7 @@
+     int             _index;         // unique index, used with MergeMemNode
+     const TypePtr*  _adr_type;      // normalized address type
+     ciField*        _field;         // relevant instance field, or null if none
++    const Type*     _element;       // relevant array element type, or null if none
+     bool            _is_rewritable; // false if the memory is write-once only
+     int             _general_index; // if this is type is an instance, the general
+                                     // type that this is an instance of
+@@ -121,6 +123,7 @@
+     int             index()         const { return _index; }
+     const TypePtr*  adr_type()      const { return _adr_type; }
+     ciField*        field()         const { return _field; }
++    const Type*     element()       const { return _element; }
+     bool            is_rewritable() const { return _is_rewritable; }
+     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
+     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
+@@ -129,7 +132,12 @@
+     void set_field(ciField* f) {
+       assert(!_field,"");
+       _field = f;
+-      if (f->is_final())  _is_rewritable = false;
++      if (f->is_final() || f->is_stable())  _is_rewritable = false;
++      // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
++    }
++    void set_element(const Type* e) {
++      assert(!_element,"");
++      _element = e;
+     }
+ 
+     void print_on(outputStream* st) PRODUCT_RETURN;
+diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
+--- a/src/share/vm/opto/graphKit.cpp
++++ b/src/share/vm/opto/graphKit.cpp
+@@ -3788,7 +3788,7 @@
+                                                      false, NULL, 0);
+   const TypePtr* value_field_type = string_type->add_offset(value_offset);
+   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
+-                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
++                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS,/*stable=*/true),
+                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
+   int value_field_idx = C->get_alias_index(value_field_type);
+   return make_load(ctrl, basic_plus_adr(str, str, value_offset),
+@@ -3811,7 +3811,7 @@
+                                                      false, NULL, 0);
+   const TypePtr* value_field_type = string_type->add_offset(value_offset);
+   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
+-                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
++                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS,/*stable=*/true),
+                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
+   int value_field_idx = C->get_alias_index(value_field_type);
+   store_to_memory(ctrl, basic_plus_adr(str, value_offset),
+diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
+--- a/src/share/vm/opto/library_call.cpp
++++ b/src/share/vm/opto/library_call.cpp
+@@ -1230,7 +1230,7 @@
+ 
+   Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) );
+   jint target_length = target_array->length();
+-  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
++  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin), /*stable=*/true);
+   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
+ 
+   IdealKit kit(this, false, true);
+diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
+--- a/src/share/vm/opto/memnode.cpp
++++ b/src/share/vm/opto/memnode.cpp
+@@ -932,12 +932,13 @@
+ Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
+   Node* ld_adr = in(MemNode::Address);
+ 
+-  const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
++  const TypeOopPtr* tp = phase->type(ld_adr)->isa_oopptr();
+   Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL;
+-  if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
+-      atp->field() != NULL && !atp->field()->is_volatile()) {
++  if (atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
++      ((EliminateAutoBox && atp->field() != NULL && !atp->field()->is_volatile())
++       || (FoldStableValues && tp->isa_aryptr() && (tp->is_aryptr()->stable())))) {
+     uint alias_idx = atp->index();
+-    bool final = atp->field()->is_final();
++    bool final = !atp->is_rewritable();
+     Node* result = NULL;
+     Node* current = st;
+     // Skip through chains of MemBarNodes checking the MergeMems for
+@@ -972,7 +973,6 @@
+     }
+   }
+ 
+-
+   // Loop around twice in the case Load -> Initialize -> Store.
+   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
+   for (int trip = 0; trip <= 1; trip++) {
+@@ -1527,6 +1527,48 @@
+   // Try to guess loaded type from pointer type
+   if (tp->base() == Type::AryPtr) {
+     const Type *t = tp->is_aryptr()->elem();
++
++    // Make sure the reference is not into the header, by comparing
++    // the offset against the offset of the start of the array's data.
++    // Different array types begin at slightly different offsets (12 vs. 16).
++    // We choose T_BYTE as an example base type that is least restrictive
++    // as to alignment, which will therefore produce the smallest
++    // possible base offset.
++    const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
++    const bool off_in_header = ((uint)off < (uint)min_base_off);
++
++    // Try to constant-fold a stable array element.
++    if (FoldStableValues && !off_in_header && off != Type::OffsetBot &&
++        adr->is_AddP() && adr->in(AddPNode::Base)->is_Con() &&
++        tp->is_aryptr()->stable()) {
++      // Decode the results of GraphKit::array_element_address.
++      BasicType loadbt = memory_type();
++      BasicType elembt = t->array_element_basic_type();
++      if (elembt == T_BOOLEAN)  elembt = T_BYTE;  // oddity about boolean[]
++      ciArray* aobj = tp->is_aryptr()->const_oop()->as_array();
++      ciConstant con = aobj->element_value_by_offset(off);
++      if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
++        const Type* con_type = Type::make_from_constant(con);
++        if (con_type != NULL) {
++          if (con_type->isa_aryptr()) {
++            // Join with the array element type, in case it is also stable.
++            int dim = tp->is_aryptr()->stable_dimension();
++            con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
++          }
++          if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
++            con_type = con_type->make_narrowoop();
++          }
++#ifndef PRODUCT
++          if (TraceIterativeGVN) {
++            tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
++            con_type->dump(); tty->cr();
++          }
++#endif //PRODUCT
++          return con_type;
++        }
++      }
++    }
++
+     // Don't do this for integer types. There is only potential profit if
+     // the element type t is lower than _type; that is, for int types, if _type is
+     // more restrictive than t.  This only happens here if one is short and the other
+@@ -1547,14 +1589,7 @@
+         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
+       // t might actually be lower than _type, if _type is a unique
+       // concrete subclass of abstract class t.
+-      // Make sure the reference is not into the header, by comparing
+-      // the offset against the offset of the start of the array's data.
+-      // Different array types begin at slightly different offsets (12 vs. 16).
+-      // We choose T_BYTE as an example base type that is least restrictive
+-      // as to alignment, which will therefore produce the smallest
+-      // possible base offset.
+-      const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+-      if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
++      if (!off_in_header) {
+         const Type* jt = t->join(_type);
+         // In any case, do not allow the join, per se, to empty out the type.
+         if (jt->empty() && !t->empty()) {
+diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
+--- a/src/share/vm/opto/parse.hpp
++++ b/src/share/vm/opto/parse.hpp
+@@ -503,7 +503,7 @@
+ 
+   // loading from a constant field or the constant pool
+   // returns false if push failed (non-perm field constants only, not ldcs)
+-  bool push_constant(ciConstant con, bool require_constant = false);
++  bool push_constant(ciConstant con, bool require_constant = false, const Type* basic_type = NULL);
+ 
+   // implementation of object creation bytecodes
+   void emit_guard_for_new(ciInstanceKlass* klass);
+diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
+--- a/src/share/vm/opto/parse1.cpp
++++ b/src/share/vm/opto/parse1.cpp
+@@ -917,6 +917,7 @@
+     // such unusual early publications.  But no barrier is needed on
+     // exceptional returns, since they cannot publish normally.
+     //
++    // Any method can write a @Stable field, and we give those the same treatment.
+     _exits.insert_mem_bar(Op_MemBarRelease);
+ #ifndef PRODUCT
+     if (PrintOpto && (Verbose || WizardMode)) {
+diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
+--- a/src/share/vm/opto/parse3.cpp
++++ b/src/share/vm/opto/parse3.cpp
+@@ -147,14 +147,21 @@
+ void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
+   // Does this field have a constant value?  If so, just push the value.
+   if (field->is_constant()) {
+-    // final field
++    // final or stable field
++    const Type* stable_type = NULL;
++    if (FoldStableValues && field->is_stable()) {
++      stable_type = Type::get_const_type(field->type());
++      if (field->type()->is_array_klass()) {
++        int stable_dimension = field->type()->as_array_klass()->dimension();
++        stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
++      }
++    }
+     if (field->is_static()) {
+       // final static field
+-      if (push_constant(field->constant_value()))
++      if (push_constant(field->constant_value(), false, stable_type))
+         return;
+-    }
+-    else {
+-      // final non-static field
++    } else {
++      // final or stable non-static field
+       // Treat final non-static fields of trusted classes (classes in
+       // java.lang.invoke and sun.invoke packages and subpackages) as
+       // compile time constants.
+@@ -162,8 +169,12 @@
+         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
+         ciObject* constant_oop = oop_ptr->const_oop();
+         ciConstant constant = field->constant_value_of(constant_oop);
+-        if (push_constant(constant, true))
+-          return;
++        if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
++          // fall through to field load; the field is not yet initialized
++        } else {
++          if (push_constant(constant, true, stable_type))
++            return;
++        }
+       }
+     }
+   }
+@@ -302,41 +313,28 @@
+   // Note the presence of writes to final non-static fields, so that we
+   // can insert a memory barrier later on to keep the writes from floating
+   // out of the constructor.
+-  if (is_field && field->is_final()) {
++  // Any method can write a @Stable field; insert memory barriers after those also.
++  if (is_field && field->is_final()
++      || field->is_stable()) {
+     set_wrote_final(true);
+   }
+ }
+ 
+ 
+-bool Parse::push_constant(ciConstant constant, bool require_constant) {
++bool Parse::push_constant(ciConstant constant, bool require_constant, const Type* stable_type) {
++  const Type* con_type = Type::make_from_constant(constant, require_constant);
+   switch (constant.basic_type()) {
+-  case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
+-  case T_INT:      push( intcon(constant.as_int())     ); break;
+-  case T_CHAR:     push( intcon(constant.as_char())    ); break;
+-  case T_BYTE:     push( intcon(constant.as_byte())    ); break;
+-  case T_SHORT:    push( intcon(constant.as_short())   ); break;
+-  case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
+-  case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
+-  case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
+   case T_ARRAY:
+-  case T_OBJECT: {
++  case T_OBJECT:
+     // cases:
+     //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
+     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
+     // An oop is not scavengable if it is in the perm gen.
+-    ciObject* oop_constant = constant.as_object();
+-    if (oop_constant->is_null_object()) {
+-      push( zerocon(T_OBJECT) );
+-      break;
+-    } else if (require_constant || oop_constant->should_be_constant()) {
+-      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
+-      break;
+-    } else {
+-      // we cannot inline the oop, but we can use it later to narrow a type
+-      return false;
+-    }
+-  }
+-  case T_ILLEGAL: {
++    if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
++      con_type = con_type->join(stable_type);
++    break;
++
++  case T_ILLEGAL:
+     // Invalid ciConstant returned due to OutOfMemoryError in the CI
+     assert(C->env()->failing(), "otherwise should not see this");
+     // These always occur because of object types; we are going to
+@@ -344,17 +342,16 @@
+     push( zerocon(T_OBJECT) );
+     return false;
+   }
+-  default:
+-    ShouldNotReachHere();
++
++  if (con_type == NULL)
++    // we cannot inline the oop, but we can use it later to narrow a type
+     return false;
+-  }
+ 
+-  // success
++  push_node(constant.basic_type(), makecon(con_type));
+   return true;
+ }
+ 
+ 
+-
+ //=============================================================================
+ void Parse::do_anewarray() {
+   bool will_link;
+diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
+--- a/src/share/vm/opto/type.cpp
++++ b/src/share/vm/opto/type.cpp
+@@ -188,6 +188,38 @@
+ }
+ 
+ 
++//-----------------------make_from_constant------------------------------------
++const Type* Type::make_from_constant(ciConstant constant,
++                                     bool require_constant) {
++  switch (constant.basic_type()) {
++  case T_BOOLEAN:  return TypeInt::make(constant.as_boolean());
++  case T_CHAR:     return TypeInt::make(constant.as_char());
++  case T_BYTE:     return TypeInt::make(constant.as_byte());
++  case T_SHORT:    return TypeInt::make(constant.as_short());
++  case T_INT:      return TypeInt::make(constant.as_int());
++  case T_LONG:     return TypeLong::make(constant.as_long());
++  case T_FLOAT:    return TypeF::make(constant.as_float());
++  case T_DOUBLE:   return TypeD::make(constant.as_double());
++  case T_ARRAY:
++  case T_OBJECT:
++    {
++      // cases:
++      //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
++      //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
++      // An oop is not scavengable if it is in the perm gen.
++      ciObject* oop_constant = constant.as_object();
++      if (oop_constant->is_null_object()) {
++        return Type::get_zero_type(T_OBJECT);
++      } else if (require_constant || oop_constant->should_be_constant()) {
++        return TypeOopPtr::make_from_constant(oop_constant, require_constant);
++      }
++    }
++  }
++  // Fall through to failure
++  return NULL;
++}
++
++
+ //------------------------------make-------------------------------------------
+ // Create a simple Type, with default empty symbol sets.  Then hashcons it
+ // and look for an existing copy in the type dictionary.
+@@ -1804,12 +1836,13 @@
+ }
+ 
+ //------------------------------make-------------------------------------------
+-const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
++const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) {
+   if (UseCompressedOops && elem->isa_oopptr()) {
+     elem = elem->make_narrowoop();
+   }
++  assert(stable == true || stable == false, "");
+   size = normalize_array_size(size);
+-  return (TypeAry*)(new TypeAry(elem,size))->hashcons();
++  return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons();
+ }
+ 
+ //------------------------------meet-------------------------------------------
+@@ -1830,7 +1863,8 @@
+   case Array: {                 // Meeting 2 arrays?
+     const TypeAry *a = t->is_ary();
+     return TypeAry::make(_elem->meet(a->_elem),
+-                         _size->xmeet(a->_size)->is_int());
++                         _size->xmeet(a->_size)->is_int(),
++                         _stable & a->_stable);
+   }
+   case Top:
+     break;
+@@ -1843,7 +1877,7 @@
+ const Type *TypeAry::xdual() const {
+   const TypeInt* size_dual = _size->dual()->is_int();
+   size_dual = normalize_array_size(size_dual);
+-  return new TypeAry( _elem->dual(), size_dual);
++  return new TypeAry(_elem->dual(), size_dual, !_stable);
+ }
+ 
+ //------------------------------eq---------------------------------------------
+@@ -1851,13 +1885,14 @@
+ bool TypeAry::eq( const Type *t ) const {
+   const TypeAry *a = (const TypeAry*)t;
+   return _elem == a->_elem &&
++    _stable == a->_stable &&
+     _size == a->_size;
+ }
+ 
+ //------------------------------hash-------------------------------------------
+ // Type-specific hashing function.
+ int TypeAry::hash(void) const {
+-  return (intptr_t)_elem + (intptr_t)_size;
++  return (intptr_t)_elem + (intptr_t)_size + (_stable ? 42 : 0);
+ }
+ 
+ //----------------------interface_vs_oop---------------------------------------
+@@ -1874,6 +1909,7 @@
+ //------------------------------dump2------------------------------------------
+ #ifndef PRODUCT
+ void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
++  if (_stable)  st->print("stable:");
+   _elem->dump2(d, depth, st);
+   st->print("[");
+   _size->dump2(d, depth, st);
+@@ -3387,11 +3423,34 @@
+   assert(new_size != NULL, "");
+   new_size = narrow_size_type(new_size);
+   if (new_size == size())  return this;
+-  const TypeAry* new_ary = TypeAry::make(elem(), new_size);
++  const TypeAry* new_ary = TypeAry::make(elem(), new_size, stable());
+   return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
+ }
+ 
+ 
++//------------------------------cast_to_stable---------------------------------
++const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const {
++  assert(stable == true || stable == false, "");
++  if (stable_dimension <= 0 || stable_dimension == 1 && stable == this->stable())  return this;
++  const Type* elem = this->elem();
++  const TypePtr* elem_ptr = elem->make_ptr();
++  if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->base() == Type::AryPtr)
++    // If this is widened from a narrow oop, TypeAry::make will re-narrow it.
++    elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1);
++  const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
++  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
++}
++
++//-----------------------------stable_dimension--------------------------------
++int TypeAryPtr::stable_dimension() const {
++  if (!stable())  return 0;
++  int dim = 1;
++  const TypePtr* elem_ptr = elem()->make_ptr();
++  if (elem_ptr != NULL && elem_ptr->isa_aryptr())
++    dim += elem_ptr->is_aryptr()->stable_dimension();
++  return dim;
++}
++
+ //------------------------------eq---------------------------------------------
+ // Structural equality check for Type representations
+ bool TypeAryPtr::eq( const Type *t ) const {
+@@ -3499,7 +3558,7 @@
+         // Something like byte[int+] meets char[int+].
+         // This must fall to bottom, not (int[-128..65535])[int+].
+         instance_id = InstanceBot;
+-        tary = TypeAry::make(Type::BOTTOM, tary->_size);
++        tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+       }
+     } else // Non integral arrays.
+     // Must fall to bottom if exact klasses in upper lattice
+@@ -3513,7 +3572,7 @@
+          (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
+          // 'this' is exact and super or unrelated:
+          (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
+-      tary = TypeAry::make(Type::BOTTOM, tary->_size);
++      tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
+       return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
+     }
+ 
+diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
+--- a/src/share/vm/opto/type.hpp
++++ b/src/share/vm/opto/type.hpp
+@@ -357,6 +357,9 @@
+   // Mapping from CI type system to compiler type:
+   static const Type* get_typeflow_type(ciType* type);
+ 
++  static const Type* make_from_constant(ciConstant constant,
++                                        bool require_constant = false);
++
+ private:
+   // support arrays
+   static const BasicType _basic_type[];
+@@ -573,8 +576,8 @@
+ //------------------------------TypeAry----------------------------------------
+ // Class of Array Types
+ class TypeAry : public Type {
+-  TypeAry( const Type *elem, const TypeInt *size) : Type(Array),
+-    _elem(elem), _size(size) {}
++  TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array),
++      _elem(elem), _size(size), _stable(stable) {}
+ public:
+   virtual bool eq( const Type *t ) const;
+   virtual int  hash() const;             // Type specific hashing
+@@ -584,10 +587,11 @@
+ private:
+   const Type *_elem;            // Element type of array
+   const TypeInt *_size;         // Elements in array
++  const bool _stable;           // Are elements @Stable?
+   friend class TypeAryPtr;
+ 
+ public:
+-  static const TypeAry *make(  const Type *elem, const TypeInt *size);
++  static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false);
+ 
+   virtual const Type *xmeet( const Type *t ) const;
+   virtual const Type *xdual() const;    // Compute dual right now.
+@@ -959,6 +963,7 @@
+   const TypeAry* ary() const  { return _ary; }
+   const Type*    elem() const { return _ary->_elem; }
+   const TypeInt* size() const { return _ary->_size; }
++  bool         stable() const { return _ary->_stable; }
+ 
+   static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
+   // Constant pointer to array
+@@ -980,6 +985,9 @@
+   virtual const Type *xmeet( const Type *t ) const;
+   virtual const Type *xdual() const;    // Compute dual right now.
+ 
++  const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
++  int stable_dimension() const;
++
+   // Convenience common pre-built types.
+   static const TypeAryPtr *RANGE;
+   static const TypeAryPtr *OOPS;
+diff --git a/src/share/vm/utilities/accessFlags.hpp b/src/share/vm/utilities/accessFlags.hpp
+--- a/src/share/vm/utilities/accessFlags.hpp
++++ b/src/share/vm/utilities/accessFlags.hpp
+@@ -78,11 +78,13 @@
+   JVM_ACC_FIELD_ACCESS_WATCHED       = 0x00002000,  // field access is watched by JVMTI
+   JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000,  // field modification is watched by JVMTI
+   JVM_ACC_FIELD_INTERNAL             = 0x00000400,  // internal field, same as JVM_ACC_ABSTRACT
++  JVM_ACC_FIELD_STABLE               = 0x00000020,  // @Stable field, same as JVM_ACC_SYNCHRONIZED
+   JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
+ 
+   JVM_ACC_FIELD_INTERNAL_FLAGS       = JVM_ACC_FIELD_ACCESS_WATCHED |
+                                        JVM_ACC_FIELD_MODIFICATION_WATCHED |
+                                        JVM_ACC_FIELD_INTERNAL |
++                                       JVM_ACC_FIELD_STABLE |
+                                        JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE,
+ 
+                                                     // flags accepted by set_field_flags()
+@@ -148,6 +150,7 @@
+                                         { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
+   bool on_stack() const                 { return (_flags & JVM_ACC_ON_STACK) != 0; }
+   bool is_internal() const              { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
++  bool is_stable() const                { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
+   bool field_has_generic_signature() const
+                                         { return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; }
+ 
--- a/anno-stable.patch	Mon Oct 15 17:45:20 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1037 +0,0 @@
-Summary: @Stable annotation for constant folding of lazily evaluated variables.
-
-diff --git a/src/share/vm/ci/ciArray.cpp b/src/share/vm/ci/ciArray.cpp
---- a/src/share/vm/ci/ciArray.cpp
-+++ b/src/share/vm/ci/ciArray.cpp
-@@ -26,12 +26,102 @@
- #include "ci/ciArray.hpp"
- #include "ci/ciKlass.hpp"
- #include "ci/ciUtilities.hpp"
-+#include "oops/objArrayOop.hpp"
-+#include "oops/typeArrayOop.hpp"
- 
- // ciArray
- //
- // This class represents an arrayOop in the HotSpot virtual
- // machine.
- 
-+ciArrayKlass* ciArray::array_type() {
-+  return klass()->as_array_klass();
-+}
-+
-+ciType* ciArray::element_type() {
-+  return array_type()->element_type();
-+}
-+
-+BasicType ciArray::element_basic_type() {
-+  return element_type()->basic_type();
-+}
-+
-+static BasicType fixup_element_type(BasicType bt) {
-+  if (bt == T_ARRAY)    return T_OBJECT;
-+  if (bt == T_BOOLEAN)  return T_BYTE;
-+  return bt;
-+}
-+
-+ciConstant ciArray::element_value_impl(BasicType elembt,
-+                                       arrayOop ary,
-+                                       int index) {
-+  if (ary == NULL)
-+    return ciConstant();
-+  assert(ary->is_array(), "");
-+  if (index < 0 || index >= ary->length())
-+    return ciConstant();
-+  arrayKlass* ak = (arrayKlass*) ary->klass();
-+  BasicType abt = ak->element_type();
-+  if (fixup_element_type(elembt) !=
-+      fixup_element_type(abt))
-+    return ciConstant();
-+  switch (elembt) {
-+  case T_ARRAY:
-+  case T_OBJECT:
-+    {
-+      assert(ary->is_objArray(), "");
-+      objArrayOop objary = (objArrayOop) ary;
-+      oop elem = objary->obj_at(index);
-+      ciEnv* env = CURRENT_ENV;
-+      ciObject* box = env->get_object(elem);
-+      return ciConstant(T_OBJECT, box);
-+    }
-+  }
-+  assert(ary->is_typeArray(), "");
-+  typeArrayOop tary = (typeArrayOop) ary;
-+  jint value = 0;
-+  switch (elembt) {
-+  case T_LONG:          return ciConstant(tary->long_at(index));
-+  case T_FLOAT:         return ciConstant(tary->float_at(index));
-+  case T_DOUBLE:        return ciConstant(tary->double_at(index));
-+  default:              return ciConstant();
-+  case T_BYTE:          value = tary->byte_at(index);           break;
-+  case T_BOOLEAN:       value = tary->byte_at(index) & 1;       break;
-+  case T_SHORT:         value = tary->short_at(index);          break;
-+  case T_CHAR:          value = tary->char_at(index);           break;
-+  case T_INT:           value = tary->int_at(index);            break;
-+  }
-+  return ciConstant(elembt, value);
-+}
-+
-+// ------------------------------------------------------------------
-+// ciArray::element_value
-+//
-+// Current value of an element.
-+// Returns T_ILLEGAL if there is no element at the given index.
-+ciConstant ciArray::element_value(int index) {
-+  BasicType elembt = element_basic_type();
-+  GUARDED_VM_ENTRY(
-+    return element_value_impl(elembt, get_arrayOop(), index);
-+  )
-+}
-+
-+// ------------------------------------------------------------------
-+// ciArray::element_value_by_offset
-+//
-+// Current value of an element at the specified offset.
-+// Returns T_ILLEGAL if there is no element at the given offset.
-+ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
-+  BasicType elembt = element_basic_type();
-+  intptr_t shift  = exact_log2(type2aelembytes(elembt));
-+  intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
-+  intptr_t index = (element_offset - header) >> shift;
-+  intptr_t offset = header + ((intptr_t)index << shift);
-+  if (offset != element_offset || index != (jint)index)
-+    return ciConstant();
-+  return element_value((jint) index);
-+}
-+
- // ------------------------------------------------------------------
- // ciArray::print_impl
- //
-diff --git a/src/share/vm/ci/ciArray.hpp b/src/share/vm/ci/ciArray.hpp
---- a/src/share/vm/ci/ciArray.hpp
-+++ b/src/share/vm/ci/ciArray.hpp
-@@ -51,9 +51,24 @@
- 
-   void print_impl(outputStream* st);
- 
-+  ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
-+
- public:
-   int length() { return _length; }
- 
-+  // Convenience routines.
-+  ciArrayKlass* array_type();       // klass()->as_array_klass()
-+  ciType* element_type();           // array_type()->element_type()
-+  BasicType element_basic_type();   // element_type()->basic_type()
-+
-+  // Current value of an element.
-+  // Returns T_ILLEGAL if there is no element at the given index.
-+  ciConstant element_value(int index);
-+
-+  // Current value of an element at the specified offset.
-+  // Returns T_ILLEGAL if there is no element at the given offset.
-+  ciConstant element_value_by_offset(intptr_t element_offset);
-+
-   // What kind of ciObject is this?
-   bool is_array()        { return true; }
-   bool is_java_object()  { return true; }
-diff --git a/src/share/vm/ci/ciConstant.hpp b/src/share/vm/ci/ciConstant.hpp
---- a/src/share/vm/ci/ciConstant.hpp
-+++ b/src/share/vm/ci/ciConstant.hpp
-@@ -41,7 +41,6 @@
-   union {
-     jint      _int;
-     jlong     _long;
--    jint      _long_half[2];
-     jfloat    _float;
-     jdouble   _double;
-     ciObject* _object;
-@@ -111,6 +110,19 @@
-     return _value._object;
-   }
- 
-+  bool     is_null_or_zero() const {
-+    if (!is_java_primitive(basic_type()))
-+      return as_object()->is_null_object();
-+    else if (type2size[basic_type()] == 1)
-+      // treat float bits as int, to avoid comparison with -0 and NaN
-+      return (_value._int == 0);
-+    else if (type2size[basic_type()] == 2)
-+      // treat double bits as long, to avoid comparison with -0 and NaN
-+      return (_value._long == 0);
-+    else
-+      return false;
-+  }
-+
-   // Debugging output
-   void print();
- };
-diff --git a/src/share/vm/ci/ciField.cpp b/src/share/vm/ci/ciField.cpp
---- a/src/share/vm/ci/ciField.cpp
-+++ b/src/share/vm/ci/ciField.cpp
-@@ -189,12 +189,14 @@
-   _holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
- 
-   // Check to see if the field is constant.
--  if (_holder->is_initialized() && this->is_final()) {
-+  bool is_final = this->is_final();
-+  bool is_stable = this->is_stable();
-+  if (_holder->is_initialized() && (is_final || is_stable)) {
-     if (!this->is_static()) {
-       // A field can be constant if it's a final static field or if
-       // it's a final non-static field of a trusted class (classes in
-       // java.lang.invoke and sun.invoke packages and subpackages).
--      if (trust_final_non_static_fields(_holder)) {
-+      if (is_stable || trust_final_non_static_fields(_holder)) {
-         _is_constant = true;
-         return;
-       }
-@@ -227,7 +229,6 @@
- 
-     Handle mirror = k->java_mirror();
- 
--    _is_constant = true;
-     switch(type()->basic_type()) {
-     case T_BYTE:
-       _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
-@@ -273,6 +274,12 @@
-         }
-       }
-     }
-+    if (is_stable && _constant_value.is_null_or_zero()) {
-+      // It is not a constant after all; treat it as uninitialized.
-+      _is_constant = false;
-+    } else {
-+      _is_constant = true;
-+    }
-   } else {
-     _is_constant = false;
-   }
-@@ -373,6 +380,7 @@
-   tty->print(" offset=%d type=", _offset);
-   if (_type != NULL) _type->print_name();
-   else               tty->print("(reference)");
-+  tty->print(" flags=%04x", flags().as_int());
-   tty->print(" is_constant=%s", bool_to_str(_is_constant));
-   if (_is_constant && is_static()) {
-     tty->print(" constant_value=");
-diff --git a/src/share/vm/ci/ciField.hpp b/src/share/vm/ci/ciField.hpp
---- a/src/share/vm/ci/ciField.hpp
-+++ b/src/share/vm/ci/ciField.hpp
-@@ -139,7 +139,10 @@
-   //      non-constant fields.  These are java.lang.System.in
-   //      and java.lang.System.out.  Abomination.
-   //
--  // Note: the check for case 4 is not yet implemented.
-+  // A field is also considered constant if it is marked @Stable
-+  // and is non-null (or non-zero, if a primitive).
-+  // For non-static fields, the null/zero check must be
-+  // arranged by the user, as constant_value().is_null_or_zero().
-   bool is_constant() { return _is_constant; }
- 
-   // Get the constant value of this field.
-@@ -173,6 +176,7 @@
-   bool is_protected   () { return flags().is_protected(); }
-   bool is_static      () { return flags().is_static(); }
-   bool is_final       () { return flags().is_final(); }
-+  bool is_stable      () { return flags().is_stable(); }
-   bool is_volatile    () { return flags().is_volatile(); }
-   bool is_transient   () { return flags().is_transient(); }
- 
-diff --git a/src/share/vm/ci/ciFlags.hpp b/src/share/vm/ci/ciFlags.hpp
---- a/src/share/vm/ci/ciFlags.hpp
-+++ b/src/share/vm/ci/ciFlags.hpp
-@@ -59,6 +59,7 @@
-   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
-   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
-   bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
-+  bool is_stable      () const         { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
- 
-   // Conversion
-   jint   as_int()                      { return _flags; }
-diff --git a/src/share/vm/ci/ciInstance.cpp b/src/share/vm/ci/ciInstance.cpp
---- a/src/share/vm/ci/ciInstance.cpp
-+++ b/src/share/vm/ci/ciInstance.cpp
-@@ -127,6 +127,7 @@
- ciConstant ciInstance::field_value_by_offset(int field_offset) {
-   ciInstanceKlass* ik = klass()->as_instance_klass();
-   ciField* field = ik->get_field_by_offset(field_offset, false);
-+  if (field == NULL)  return ciConstant();  // T_ILLEGAL
-   return field_value(field);
- }
- 
-diff --git a/src/share/vm/ci/ciTypeArray.cpp b/src/share/vm/ci/ciTypeArray.cpp
---- a/src/share/vm/ci/ciTypeArray.cpp
-+++ b/src/share/vm/ci/ciTypeArray.cpp
-@@ -39,5 +39,10 @@
- jchar ciTypeArray::char_at(int index) {
-   VM_ENTRY_MARK;
-   assert(index >= 0 && index < length(), "out of range");
--  return get_typeArrayOop()->char_at(index);
-+  jchar c = element_value(index).as_char();
-+#ifdef ASSERT
-+  jchar d = get_typeArrayOop()->char_at(index);
-+  assert(c == d, "");
-+#endif //ASSERT
-+  return c;
- }
-diff --git a/src/share/vm/classfile/classFileParser.cpp b/src/share/vm/classfile/classFileParser.cpp
---- a/src/share/vm/classfile/classFileParser.cpp
-+++ b/src/share/vm/classfile/classFileParser.cpp
-@@ -959,6 +959,7 @@
-         runtime_visible_annotations_length = attribute_length;
-         runtime_visible_annotations = cfs->get_u1_buffer();
-         assert(runtime_visible_annotations != NULL, "null visible annotations");
-+        parse_annotations(loader_data, runtime_visible_annotations, runtime_visible_annotations_length, cp, parsed_annotations, CHECK);
-         cfs->skip_u1(runtime_visible_annotations_length, CHECK);
-       } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
-         runtime_invisible_annotations_length = attribute_length;
-@@ -1695,7 +1696,8 @@
- }
- 
- // Sift through annotations, looking for those significant to the VM:
--void ClassFileParser::parse_annotations(u1* buffer, int limit,
-+void ClassFileParser::parse_annotations(ClassLoaderData* loader_data,
-+                                        u1* buffer, int limit,
-                                         constantPoolHandle cp,
-                                         ClassFileParser::AnnotationCollector* coll,
-                                         TRAPS) {
-@@ -1733,7 +1735,7 @@
-     }
- 
-     // Here is where parsing particular annotations will take place.
--    AnnotationCollector::ID id = coll->annotation_index(aname);
-+    AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
-     if (id == AnnotationCollector::_unknown)  continue;
-     coll->set_annotation(id);
-     // If there are no values, just set the bit and move on:
-@@ -1762,28 +1764,44 @@
-   }
- }
- 
--ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Symbol* name) {
-+ClassFileParser::AnnotationCollector::ID
-+ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_data,
-+                                                       Symbol* name) {
-   vmSymbols::SID sid = vmSymbols::find_sid(name);
-+  bool privileged = false;
-+  if (loader_data->is_the_null_class_loader_data()) {
-+    // Privileged code can use all annotations.  Other code silently drops some.
-+    privileged = true;
-+  }
-   switch (sid) {
-   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
-     if (_location != _in_method)  break;  // only allow for methods
-+    if (!privileged)              break;  // only allow in privileged code
-     return _method_ForceInline;
-   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_DontInline_signature):
-     if (_location != _in_method)  break;  // only allow for methods
-+    if (!privileged)              break;  // only allow in privileged code
-     return _method_DontInline;
-   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
-     if (_location != _in_method)  break;  // only allow for methods
-+    if (!privileged)              break;  // only allow in privileged code
-     return _method_LambdaForm_Compiled;
-   case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature):
-     if (_location != _in_method)  break;  // only allow for methods
-+    if (!privileged)              break;  // only allow in privileged code
-     return _method_LambdaForm_Hidden;
-+  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
-+    if (_location != _in_field)   break;  // only allow for fields
-+    if (!privileged)              break;  // only allow in privileged code
-+    return _field_Stable;
-   default: break;
-   }
-   return AnnotationCollector::_unknown;
- }
- 
- void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
--  fatal("no field annotations yet");
-+  if (has_annotation(_field_Stable))
-+    f->set_stable(true);
- }
- 
- void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
-@@ -2125,7 +2143,7 @@
-         runtime_visible_annotations_length = method_attribute_length;
-         runtime_visible_annotations = cfs->get_u1_buffer();
-         assert(runtime_visible_annotations != NULL, "null visible annotations");
--        parse_annotations(runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
-+        parse_annotations(loader_data, runtime_visible_annotations, runtime_visible_annotations_length, cp, &parsed_annotations, CHECK_(nullHandle));
-         cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
-       } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
-         runtime_invisible_annotations_length = method_attribute_length;
-@@ -2785,7 +2803,8 @@
-         runtime_visible_annotations_length = attribute_length;
-         runtime_visible_annotations = cfs->get_u1_buffer();
-         assert(runtime_visible_annotations != NULL, "null visible annotations");
--        parse_annotations(runtime_visible_annotations,
-+        parse_annotations(loader_data,
-+                          runtime_visible_annotations,
-                           runtime_visible_annotations_length,
-                           cp,
-                           parsed_annotations,
-diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp
---- a/src/share/vm/classfile/classFileParser.hpp
-+++ b/src/share/vm/classfile/classFileParser.hpp
-@@ -92,6 +92,7 @@
-       _method_DontInline,
-       _method_LambdaForm_Compiled,
-       _method_LambdaForm_Hidden,
-+      _field_Stable,
-       _annotation_LIMIT
-     };
-     const Location _location;
-@@ -102,7 +103,7 @@
-       assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
-     }
-     // If this annotation name has an ID, report it (or _none).
--    ID annotation_index(Symbol* name);
-+    ID annotation_index(ClassLoaderData* loader_data, Symbol* name);
-     // Set the annotation name:
-     void set_annotation(ID id) {
-       assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
-@@ -237,7 +238,8 @@
-                                        int runtime_invisible_annotations_length, TRAPS);
-   int skip_annotation(u1* buffer, int limit, int index);
-   int skip_annotation_value(u1* buffer, int limit, int index);
--  void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
-+  void parse_annotations(ClassLoaderData* loader_data,
-+                         u1* buffer, int limit, constantPoolHandle cp,
-                          /* Results (currently, only one result is supported): */
-                          AnnotationCollector* result,
-                          TRAPS);
-diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
---- a/src/share/vm/classfile/vmSymbols.hpp
-+++ b/src/share/vm/classfile/vmSymbols.hpp
-@@ -255,6 +255,7 @@
-   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
-   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
-   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
-+  template(sun_invoke_Stable_signature,               "Lsun/invoke/Stable;")                      \
-   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
-   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
-   /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */         \
-diff --git a/src/share/vm/oops/fieldInfo.hpp b/src/share/vm/oops/fieldInfo.hpp
---- a/src/share/vm/oops/fieldInfo.hpp
-+++ b/src/share/vm/oops/fieldInfo.hpp
-@@ -114,6 +114,14 @@
-     return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
-   }
- 
-+  bool is_stable() const {
-+    return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
-+  }
-+  void set_stable(bool z) {
-+    if (z) _shorts[access_flags_offset] |=  JVM_ACC_FIELD_STABLE;
-+    else   _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
-+  }
-+
-   Symbol* lookup_symbol(int symbol_index) const {
-     assert(is_internal(), "only internal fields");
-     return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
-diff --git a/src/share/vm/opto/c2_globals.hpp b/src/share/vm/opto/c2_globals.hpp
---- a/src/share/vm/opto/c2_globals.hpp
-+++ b/src/share/vm/opto/c2_globals.hpp
-@@ -433,6 +433,9 @@
-   diagnostic(bool, EliminateAutoBox, false,                                 \
-           "Private flag to control optimizations for autobox elimination")  \
-                                                                             \
-+  diagnostic(bool, FoldStableValues, false,                                 \
-+          "Private flag to control optimizations for stable variables")     \
-+                                                                            \
-   product(intx, AutoBoxCacheMax, 128,                                       \
-           "Sets max value cached by the java.lang.Integer autobox cache")   \
-                                                                             \
-diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
---- a/src/share/vm/opto/compile.cpp
-+++ b/src/share/vm/opto/compile.cpp
-@@ -1162,6 +1162,10 @@
- 
-   // Array pointers need some flattening
-   const TypeAryPtr *ta = tj->isa_aryptr();
-+  if (ta && ta->stable()) {
-+    // Erase stability property for alias analysis.
-+    tj = ta = ta->cast_to_stable(false);
-+  }
-   if( ta && is_known_inst ) {
-     if ( offset != Type::OffsetBot &&
-          offset > arrayOopDesc::length_offset_in_bytes() ) {
-@@ -1362,6 +1366,7 @@
-   _index = i;
-   _adr_type = at;
-   _field = NULL;
-+  _element = NULL;
-   _is_rewritable = true; // default
-   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
-   if (atoop != NULL && atoop->is_known_instance()) {
-@@ -1480,6 +1485,15 @@
-           && flat->is_instptr()->klass() == env()->Class_klass())
-         alias_type(idx)->set_rewritable(false);
-     }
-+    if (flat->isa_aryptr()) {
-+#ifdef ASSERT
-+      const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
-+      // (T_BYTE has the weakest alignment and size restrictions...)
-+      assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
-+#endif
-+      if (flat->offset() == TypePtr::OffsetBot)
-+        alias_type(idx)->set_element(flat->is_aryptr()->elem());
-+    }
-     if (flat->isa_klassptr()) {
-       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
-         alias_type(idx)->set_rewritable(false);
-@@ -1542,7 +1556,7 @@
-   else
-     t = TypeOopPtr::make_from_klass_raw(field->holder());
-   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
--  assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
-+  assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
-   return atp;
- }
- 
-diff --git a/src/share/vm/opto/compile.hpp b/src/share/vm/opto/compile.hpp
---- a/src/share/vm/opto/compile.hpp
-+++ b/src/share/vm/opto/compile.hpp
-@@ -69,6 +69,7 @@
- class StartNode;
- class SafePointNode;
- class JVMState;
-+class Type;
- class TypeData;
- class TypePtr;
- class TypeFunc;
-@@ -111,6 +112,7 @@
-     int             _index;         // unique index, used with MergeMemNode
-     const TypePtr*  _adr_type;      // normalized address type
-     ciField*        _field;         // relevant instance field, or null if none
-+    const Type*     _element;       // relevant array element type, or null if none
-     bool            _is_rewritable; // false if the memory is write-once only
-     int             _general_index; // if this is type is an instance, the general
-                                     // type that this is an instance of
-@@ -121,6 +123,7 @@
-     int             index()         const { return _index; }
-     const TypePtr*  adr_type()      const { return _adr_type; }
-     ciField*        field()         const { return _field; }
-+    const Type*     element()       const { return _element; }
-     bool            is_rewritable() const { return _is_rewritable; }
-     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
-     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
-@@ -129,7 +132,12 @@
-     void set_field(ciField* f) {
-       assert(!_field,"");
-       _field = f;
--      if (f->is_final())  _is_rewritable = false;
-+      if (f->is_final() || f->is_stable())  _is_rewritable = false;
-+      // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
-+    }
-+    void set_element(const Type* e) {
-+      assert(!_element,"");
-+      _element = e;
-     }
- 
-     void print_on(outputStream* st) PRODUCT_RETURN;
-diff --git a/src/share/vm/opto/graphKit.cpp b/src/share/vm/opto/graphKit.cpp
---- a/src/share/vm/opto/graphKit.cpp
-+++ b/src/share/vm/opto/graphKit.cpp
-@@ -3788,7 +3788,7 @@
-                                                      false, NULL, 0);
-   const TypePtr* value_field_type = string_type->add_offset(value_offset);
-   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
--                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
-+                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS,/*stable=*/true),
-                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
-   int value_field_idx = C->get_alias_index(value_field_type);
-   return make_load(ctrl, basic_plus_adr(str, str, value_offset),
-@@ -3811,7 +3811,7 @@
-                                                      false, NULL, 0);
-   const TypePtr* value_field_type = string_type->add_offset(value_offset);
-   const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
--                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
-+                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS,/*stable=*/true),
-                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
-   int value_field_idx = C->get_alias_index(value_field_type);
-   store_to_memory(ctrl, basic_plus_adr(str, value_offset),
-diff --git a/src/share/vm/opto/library_call.cpp b/src/share/vm/opto/library_call.cpp
---- a/src/share/vm/opto/library_call.cpp
-+++ b/src/share/vm/opto/library_call.cpp
-@@ -1230,7 +1230,7 @@
- 
-   Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) );
-   jint target_length = target_array->length();
--  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
-+  const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin), /*stable=*/true);
-   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
- 
-   IdealKit kit(this, false, true);
-diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
---- a/src/share/vm/opto/memnode.cpp
-+++ b/src/share/vm/opto/memnode.cpp
-@@ -932,12 +932,13 @@
- Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
-   Node* ld_adr = in(MemNode::Address);
- 
--  const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
-+  const TypeOopPtr* tp = phase->type(ld_adr)->isa_oopptr();
-   Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL;
--  if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
--      atp->field() != NULL && !atp->field()->is_volatile()) {
-+  if (atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
-+      ((EliminateAutoBox && atp->field() != NULL && !atp->field()->is_volatile())
-+       || (FoldStableValues && tp->isa_aryptr() && (tp->is_aryptr()->stable())))) {
-     uint alias_idx = atp->index();
--    bool final = atp->field()->is_final();
-+    bool final = !atp->is_rewritable();
-     Node* result = NULL;
-     Node* current = st;
-     // Skip through chains of MemBarNodes checking the MergeMems for
-@@ -972,7 +973,6 @@
-     }
-   }
- 
--
-   // Loop around twice in the case Load -> Initialize -> Store.
-   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
-   for (int trip = 0; trip <= 1; trip++) {
-@@ -1527,6 +1527,48 @@
-   // Try to guess loaded type from pointer type
-   if (tp->base() == Type::AryPtr) {
-     const Type *t = tp->is_aryptr()->elem();
-+
-+    // Make sure the reference is not into the header, by comparing
-+    // the offset against the offset of the start of the array's data.
-+    // Different array types begin at slightly different offsets (12 vs. 16).
-+    // We choose T_BYTE as an example base type that is least restrictive
-+    // as to alignment, which will therefore produce the smallest
-+    // possible base offset.
-+    const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
-+    const bool off_in_header = ((uint)off < (uint)min_base_off);
-+
-+    // Try to constant-fold a stable array element.
-+    if (FoldStableValues && !off_in_header && off != Type::OffsetBot &&
-+        adr->is_AddP() && adr->in(AddPNode::Base)->is_Con() &&
-+        tp->is_aryptr()->stable()) {
-+      // Decode the results of GraphKit::array_element_address.
-+      BasicType loadbt = memory_type();
-+      BasicType elembt = t->array_element_basic_type();
-+      if (elembt == T_BOOLEAN)  elembt = T_BYTE;  // oddity about boolean[]
-+      ciArray* aobj = tp->is_aryptr()->const_oop()->as_array();
-+      ciConstant con = aobj->element_value_by_offset(off);
-+      if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
-+        const Type* con_type = Type::make_from_constant(con);
-+        if (con_type != NULL) {
-+          if (con_type->isa_aryptr()) {
-+            // Join with the array element type, in case it is also stable.
-+            int dim = tp->is_aryptr()->stable_dimension();
-+            con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
-+          }
-+          if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
-+            con_type = con_type->make_narrowoop();
-+          }
-+#ifndef PRODUCT
-+          if (TraceIterativeGVN) {
-+            tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
-+            con_type->dump(); tty->cr();
-+          }
-+#endif //PRODUCT
-+          return con_type;
-+        }
-+      }
-+    }
-+
-     // Don't do this for integer types. There is only potential profit if
-     // the element type t is lower than _type; that is, for int types, if _type is
-     // more restrictive than t.  This only happens here if one is short and the other
-@@ -1547,14 +1589,7 @@
-         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
-       // t might actually be lower than _type, if _type is a unique
-       // concrete subclass of abstract class t.
--      // Make sure the reference is not into the header, by comparing
--      // the offset against the offset of the start of the array's data.
--      // Different array types begin at slightly different offsets (12 vs. 16).
--      // We choose T_BYTE as an example base type that is least restrictive
--      // as to alignment, which will therefore produce the smallest
--      // possible base offset.
--      const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
--      if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
-+      if (!off_in_header) {
-         const Type* jt = t->join(_type);
-         // In any case, do not allow the join, per se, to empty out the type.
-         if (jt->empty() && !t->empty()) {
-diff --git a/src/share/vm/opto/parse.hpp b/src/share/vm/opto/parse.hpp
---- a/src/share/vm/opto/parse.hpp
-+++ b/src/share/vm/opto/parse.hpp
-@@ -503,7 +503,7 @@
- 
-   // loading from a constant field or the constant pool
-   // returns false if push failed (non-perm field constants only, not ldcs)
--  bool push_constant(ciConstant con, bool require_constant = false);
-+  bool push_constant(ciConstant con, bool require_constant = false, const Type* basic_type = NULL);
- 
-   // implementation of object creation bytecodes
-   void emit_guard_for_new(ciInstanceKlass* klass);
-diff --git a/src/share/vm/opto/parse1.cpp b/src/share/vm/opto/parse1.cpp
---- a/src/share/vm/opto/parse1.cpp
-+++ b/src/share/vm/opto/parse1.cpp
-@@ -917,6 +917,7 @@
-     // such unusual early publications.  But no barrier is needed on
-     // exceptional returns, since they cannot publish normally.
-     //
-+    // Any method can write a @Stable field, and we give those the same treatment.
-     _exits.insert_mem_bar(Op_MemBarRelease);
- #ifndef PRODUCT
-     if (PrintOpto && (Verbose || WizardMode)) {
-diff --git a/src/share/vm/opto/parse3.cpp b/src/share/vm/opto/parse3.cpp
---- a/src/share/vm/opto/parse3.cpp
-+++ b/src/share/vm/opto/parse3.cpp
-@@ -147,14 +147,21 @@
- void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
-   // Does this field have a constant value?  If so, just push the value.
-   if (field->is_constant()) {
--    // final field
-+    // final or stable field
-+    const Type* stable_type = NULL;
-+    if (FoldStableValues && field->is_stable()) {
-+      stable_type = Type::get_const_type(field->type());
-+      if (field->type()->is_array_klass()) {
-+        int stable_dimension = field->type()->as_array_klass()->dimension();
-+        stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
-+      }
-+    }
-     if (field->is_static()) {
-       // final static field
--      if (push_constant(field->constant_value()))
-+      if (push_constant(field->constant_value(), false, stable_type))
-         return;
--    }
--    else {
--      // final non-static field
-+    } else {
-+      // final or stable non-static field
-       // Treat final non-static fields of trusted classes (classes in
-       // java.lang.invoke and sun.invoke packages and subpackages) as
-       // compile time constants.
-@@ -162,8 +169,12 @@
-         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
-         ciObject* constant_oop = oop_ptr->const_oop();
-         ciConstant constant = field->constant_value_of(constant_oop);
--        if (push_constant(constant, true))
--          return;
-+        if (FoldStableValues && field->is_stable() && constant.is_null_or_zero()) {
-+          // fall through to field load; the field is not yet initialized
-+        } else {
-+          if (push_constant(constant, true, stable_type))
-+            return;
-+        }
-       }
-     }
-   }
-@@ -302,41 +313,28 @@
-   // Note the presence of writes to final non-static fields, so that we
-   // can insert a memory barrier later on to keep the writes from floating
-   // out of the constructor.
--  if (is_field && field->is_final()) {
-+  // Any method can write a @Stable field; insert memory barriers after those also.
-+  if (is_field && field->is_final()
-+      || field->is_stable()) {
-     set_wrote_final(true);
-   }
- }
- 
- 
--bool Parse::push_constant(ciConstant constant, bool require_constant) {
-+bool Parse::push_constant(ciConstant constant, bool require_constant, const Type* stable_type) {
-+  const Type* con_type = Type::make_from_constant(constant, require_constant);
-   switch (constant.basic_type()) {
--  case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
--  case T_INT:      push( intcon(constant.as_int())     ); break;
--  case T_CHAR:     push( intcon(constant.as_char())    ); break;
--  case T_BYTE:     push( intcon(constant.as_byte())    ); break;
--  case T_SHORT:    push( intcon(constant.as_short())   ); break;
--  case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
--  case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
--  case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
-   case T_ARRAY:
--  case T_OBJECT: {
-+  case T_OBJECT:
-     // cases:
-     //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
-     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
-     // An oop is not scavengable if it is in the perm gen.
--    ciObject* oop_constant = constant.as_object();
--    if (oop_constant->is_null_object()) {
--      push( zerocon(T_OBJECT) );
--      break;
--    } else if (require_constant || oop_constant->should_be_constant()) {
--      push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
--      break;
--    } else {
--      // we cannot inline the oop, but we can use it later to narrow a type
--      return false;
--    }
--  }
--  case T_ILLEGAL: {
-+    if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
-+      con_type = con_type->join(stable_type);
-+    break;
-+
-+  case T_ILLEGAL:
-     // Invalid ciConstant returned due to OutOfMemoryError in the CI
-     assert(C->env()->failing(), "otherwise should not see this");
-     // These always occur because of object types; we are going to
-@@ -344,17 +342,16 @@
-     push( zerocon(T_OBJECT) );
-     return false;
-   }
--  default:
--    ShouldNotReachHere();
-+
-+  if (con_type == NULL)
-+    // we cannot inline the oop, but we can use it later to narrow a type
-     return false;
--  }
- 
--  // success
-+  push_node(constant.basic_type(), makecon(con_type));
-   return true;
- }
- 
- 
--
- //=============================================================================
- void Parse::do_anewarray() {
-   bool will_link;
-diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
---- a/src/share/vm/opto/type.cpp
-+++ b/src/share/vm/opto/type.cpp
-@@ -188,6 +188,38 @@
- }
- 
- 
-+//-----------------------make_from_constant------------------------------------
-+const Type* Type::make_from_constant(ciConstant constant,
-+                                     bool require_constant) {
-+  switch (constant.basic_type()) {
-+  case T_BOOLEAN:  return TypeInt::make(constant.as_boolean());
-+  case T_CHAR:     return TypeInt::make(constant.as_char());
-+  case T_BYTE:     return TypeInt::make(constant.as_byte());
-+  case T_SHORT:    return TypeInt::make(constant.as_short());
-+  case T_INT:      return TypeInt::make(constant.as_int());
-+  case T_LONG:     return TypeLong::make(constant.as_long());
-+  case T_FLOAT:    return TypeF::make(constant.as_float());
-+  case T_DOUBLE:   return TypeD::make(constant.as_double());
-+  case T_ARRAY:
-+  case T_OBJECT:
-+    {
-+      // cases:
-+      //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
-+      //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
-+      // An oop is not scavengable if it is in the perm gen.
-+      ciObject* oop_constant = constant.as_object();
-+      if (oop_constant->is_null_object()) {
-+        return Type::get_zero_type(T_OBJECT);
-+      } else if (require_constant || oop_constant->should_be_constant()) {
-+        return TypeOopPtr::make_from_constant(oop_constant, require_constant);
-+      }
-+    }
-+  }
-+  // Fall through to failure
-+  return NULL;
-+}
-+
-+
- //------------------------------make-------------------------------------------
- // Create a simple Type, with default empty symbol sets.  Then hashcons it
- // and look for an existing copy in the type dictionary.
-@@ -1804,12 +1836,13 @@
- }
- 
- //------------------------------make-------------------------------------------
--const TypeAry *TypeAry::make( const Type *elem, const TypeInt *size) {
-+const TypeAry* TypeAry::make(const Type* elem, const TypeInt* size, bool stable) {
-   if (UseCompressedOops && elem->isa_oopptr()) {
-     elem = elem->make_narrowoop();
-   }
-+  assert(stable == true || stable == false, "");
-   size = normalize_array_size(size);
--  return (TypeAry*)(new TypeAry(elem,size))->hashcons();
-+  return (TypeAry*)(new TypeAry(elem,size,stable))->hashcons();
- }
- 
- //------------------------------meet-------------------------------------------
-@@ -1830,7 +1863,8 @@
-   case Array: {                 // Meeting 2 arrays?
-     const TypeAry *a = t->is_ary();
-     return TypeAry::make(_elem->meet(a->_elem),
--                         _size->xmeet(a->_size)->is_int());
-+                         _size->xmeet(a->_size)->is_int(),
-+                         _stable & a->_stable);
-   }
-   case Top:
-     break;
-@@ -1843,7 +1877,7 @@
- const Type *TypeAry::xdual() const {
-   const TypeInt* size_dual = _size->dual()->is_int();
-   size_dual = normalize_array_size(size_dual);
--  return new TypeAry( _elem->dual(), size_dual);
-+  return new TypeAry(_elem->dual(), size_dual, !_stable);
- }
- 
- //------------------------------eq---------------------------------------------
-@@ -1851,13 +1885,14 @@
- bool TypeAry::eq( const Type *t ) const {
-   const TypeAry *a = (const TypeAry*)t;
-   return _elem == a->_elem &&
-+    _stable == a->_stable &&
-     _size == a->_size;
- }
- 
- //------------------------------hash-------------------------------------------
- // Type-specific hashing function.
- int TypeAry::hash(void) const {
--  return (intptr_t)_elem + (intptr_t)_size;
-+  return (intptr_t)_elem + (intptr_t)_size + (_stable ? 42 : 0);
- }
- 
- //----------------------interface_vs_oop---------------------------------------
-@@ -1874,6 +1909,7 @@
- //------------------------------dump2------------------------------------------
- #ifndef PRODUCT
- void TypeAry::dump2( Dict &d, uint depth, outputStream *st ) const {
-+  if (_stable)  st->print("stable:");
-   _elem->dump2(d, depth, st);
-   st->print("[");
-   _size->dump2(d, depth, st);
-@@ -3387,11 +3423,34 @@
-   assert(new_size != NULL, "");
-   new_size = narrow_size_type(new_size);
-   if (new_size == size())  return this;
--  const TypeAry* new_ary = TypeAry::make(elem(), new_size);
-+  const TypeAry* new_ary = TypeAry::make(elem(), new_size, stable());
-   return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
- }
- 
- 
-+//------------------------------cast_to_stable---------------------------------
-+const TypeAryPtr* TypeAryPtr::cast_to_stable(bool stable, int stable_dimension) const {
-+  assert(stable == true || stable == false, "");
-+  if (stable_dimension <= 0 || stable_dimension == 1 && stable == this->stable())  return this;
-+  const Type* elem = this->elem();
-+  const TypePtr* elem_ptr = elem->make_ptr();
-+  if (stable_dimension > 1 && elem_ptr != NULL && elem_ptr->base() == Type::AryPtr)
-+    // If this is widened from a narrow oop, TypeAry::make will re-narrow it.
-+    elem = elem_ptr = elem_ptr->is_aryptr()->cast_to_stable(stable, stable_dimension - 1);
-+  const TypeAry* new_ary = TypeAry::make(elem, size(), stable);
-+  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id);
-+}
-+
-+//-----------------------------stable_dimension--------------------------------
-+int TypeAryPtr::stable_dimension() const {
-+  if (!stable())  return 0;
-+  int dim = 1;
-+  const TypePtr* elem_ptr = elem()->make_ptr();
-+  if (elem_ptr != NULL && elem_ptr->isa_aryptr())
-+    dim += elem_ptr->is_aryptr()->stable_dimension();
-+  return dim;
-+}
-+
- //------------------------------eq---------------------------------------------
- // Structural equality check for Type representations
- bool TypeAryPtr::eq( const Type *t ) const {
-@@ -3499,7 +3558,7 @@
-         // Something like byte[int+] meets char[int+].
-         // This must fall to bottom, not (int[-128..65535])[int+].
-         instance_id = InstanceBot;
--        tary = TypeAry::make(Type::BOTTOM, tary->_size);
-+        tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
-       }
-     } else // Non integral arrays.
-     // Must fall to bottom if exact klasses in upper lattice
-@@ -3513,7 +3572,7 @@
-          (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
-          // 'this' is exact and super or unrelated:
-          (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
--      tary = TypeAry::make(Type::BOTTOM, tary->_size);
-+      tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
-       return make( NotNull, NULL, tary, lazy_klass, false, off, InstanceBot );
-     }
- 
-diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
---- a/src/share/vm/opto/type.hpp
-+++ b/src/share/vm/opto/type.hpp
-@@ -357,6 +357,9 @@
-   // Mapping from CI type system to compiler type:
-   static const Type* get_typeflow_type(ciType* type);
- 
-+  static const Type* make_from_constant(ciConstant constant,
-+                                        bool require_constant = false);
-+
- private:
-   // support arrays
-   static const BasicType _basic_type[];
-@@ -573,8 +576,8 @@
- //------------------------------TypeAry----------------------------------------
- // Class of Array Types
- class TypeAry : public Type {
--  TypeAry( const Type *elem, const TypeInt *size) : Type(Array),
--    _elem(elem), _size(size) {}
-+  TypeAry(const Type* elem, const TypeInt* size, bool stable) : Type(Array),
-+      _elem(elem), _size(size), _stable(stable) {}
- public:
-   virtual bool eq( const Type *t ) const;
-   virtual int  hash() const;             // Type specific hashing
-@@ -584,10 +587,11 @@
- private:
-   const Type *_elem;            // Element type of array
-   const TypeInt *_size;         // Elements in array
-+  const bool _stable;           // Are elements @Stable?
-   friend class TypeAryPtr;
- 
- public:
--  static const TypeAry *make(  const Type *elem, const TypeInt *size);
-+  static const TypeAry* make(const Type* elem, const TypeInt* size, bool stable = false);
- 
-   virtual const Type *xmeet( const Type *t ) const;
-   virtual const Type *xdual() const;    // Compute dual right now.
-@@ -959,6 +963,7 @@
-   const TypeAry* ary() const  { return _ary; }
-   const Type*    elem() const { return _ary->_elem; }
-   const TypeInt* size() const { return _ary->_size; }
-+  bool         stable() const { return _ary->_stable; }
- 
-   static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot);
-   // Constant pointer to array
-@@ -980,6 +985,9 @@
-   virtual const Type *xmeet( const Type *t ) const;
-   virtual const Type *xdual() const;    // Compute dual right now.
- 
-+  const TypeAryPtr* cast_to_stable(bool stable, int stable_dimension = 1) const;
-+  int stable_dimension() const;
-+
-   // Convenience common pre-built types.
-   static const TypeAryPtr *RANGE;
-   static const TypeAryPtr *OOPS;
-diff --git a/src/share/vm/utilities/accessFlags.hpp b/src/share/vm/utilities/accessFlags.hpp
---- a/src/share/vm/utilities/accessFlags.hpp
-+++ b/src/share/vm/utilities/accessFlags.hpp
-@@ -78,11 +78,13 @@
-   JVM_ACC_FIELD_ACCESS_WATCHED       = 0x00002000,  // field access is watched by JVMTI
-   JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000,  // field modification is watched by JVMTI
-   JVM_ACC_FIELD_INTERNAL             = 0x00000400,  // internal field, same as JVM_ACC_ABSTRACT
-+  JVM_ACC_FIELD_STABLE               = 0x00000020,  // @Stable field, same as JVM_ACC_SYNCHRONIZED
-   JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
- 
-   JVM_ACC_FIELD_INTERNAL_FLAGS       = JVM_ACC_FIELD_ACCESS_WATCHED |
-                                        JVM_ACC_FIELD_MODIFICATION_WATCHED |
-                                        JVM_ACC_FIELD_INTERNAL |
-+                                       JVM_ACC_FIELD_STABLE |
-                                        JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE,
- 
-                                                     // flags accepted by set_field_flags()
-@@ -148,6 +150,7 @@
-                                         { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
-   bool on_stack() const                 { return (_flags & JVM_ACC_ON_STACK) != 0; }
-   bool is_internal() const              { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
-+  bool is_stable() const                { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
-   bool field_has_generic_signature() const
-                                         { return (_flags & JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE) != 0; }
- 
--- a/series	Mon Oct 15 17:45:20 2012 -0700
+++ b/series	Wed Oct 17 21:46:25 2012 -0700
@@ -3,8 +3,8 @@
 # review pending before push to hotspot-comp:
 
 # non-pushed files are under review or development, or merely experimental:
-anno-stable.patch               #-/meth #+bf2edd3c9b0f
-value-obj.patch                 #-/meth #+bf2edd3c9b0f #-testable
+anno-stable-8001107.patch       #-/meth #+bf2edd3c9b0f
+value-obj-8001111.patch         #-/meth #+bf2edd3c9b0f #-testable
 meth.patch                      #-/meth #+bf2edd3c9b0f
 
 meth.proj.patch                 #-/meth #+projects
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/value-obj-8001111.patch	Wed Oct 17 21:46:25 2012 -0700
@@ -0,0 +1,1055 @@
+8001111: the JVM should support immutable, freely copyable objects
+Infrastructure for immutable objects, in support of value types.
+DONE:
+- implement object header representation (variation of biased lock)
+- enforce immutability in interpreter
+TO DO:
+- enforce immutability in JNI, Unsafe, and Core Reflection
+- enforce immutability in compiled code (C1, C2)
+- merge redundant checks (like null checks) before multiple xastore and putfield instructions (C1, C2)
+- constant-fold field and element values in locked objects (C1, C2), cf. stable arrays experiment
+- experiment with limiting effect of locking to parts of the class hierarchy (to allow type-based optimizations)
+- experiment lifting acmp and identityHashCode to equals and hashCode (i.e., method calls)
+- make the wrapper types (`Integer`, etc.) be permanently lockable, and have `valueOf` calls produce locked objects
+- make (selected) String instances be permanently locked?  (perhaps not; notion of interning may be too deeply embedded)
+- as a layer on top of this: value-oriented calling sequences (allowing arbitrary box/unbox ops at compiler discretion)
+- as a layer on top of this: customized species of `ArrayList<Integer>`, etc.
+
+diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp
+--- a/src/cpu/x86/vm/templateTable_x86_64.cpp
++++ b/src/cpu/x86/vm/templateTable_x86_64.cpp
+@@ -601,6 +601,11 @@
+   // destroys rbx
+   // check array
+   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
++  const Bytecodes::Code code = bytecode();
++  if (Bytecodes::is_memory_write(code)) {
++    assert_different_registers(array, index, rscratch1);
++    check_for_permanent_lock(array);  // uses rscratch1
++  }
+   // sign extend index for use by indexed load
+   __ movl2ptr(index, index);
+   // check index
+@@ -2237,6 +2242,24 @@
+   __ pop_ptr(r);
+   __ null_check(r);  // for field access must check obj.
+   __ verify_oop(r);
++  const Bytecodes::Code code = bytecode();
++  if (Bytecodes::is_memory_write(code)) {
++    check_for_permanent_lock(r);
++  }
++}
++
++void TemplateTable::check_for_permanent_lock(Register r) {
++  if (EnableFinalObjects && CheckFinalObjects) {
++    Label L;
++    Address mark(r, oopDesc::mark_offset_in_bytes());
++    __ movptr(mark, rscratch1);
++    __ andptr(rscratch1, markOopDesc::all_biased_lock_mask_bits);
++    __ cmpptr(rscratch1, (intptr_t) markOopDesc::permanently_locked_prototype());
++    __ jcc(Assembler::notEqual, L);
++    address fn = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_if_locked_permanently);
++    __ call_VM(r, fn, r);
++    __ bind(L);
++  }
+ }
+ 
+ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
+--- a/src/share/vm/interpreter/bytecodes.hpp
++++ b/src/share/vm/interpreter/bytecodes.hpp
+@@ -419,6 +419,7 @@
+                                                                              || code == _aload_2  || code == _aload_3); }
+   static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
+                                                                              || code == _astore_2 || code == _astore_3); }
++  static bool        is_memory_write(Code code)    { return (code == _putfield || (code >= _iastore && code <= _sastore)); }
+ 
+   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
+                                                            || code == _fconst_0 || code == _dconst_0); }
+diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
+--- a/src/share/vm/interpreter/interpreterRuntime.cpp
++++ b/src/share/vm/interpreter/interpreterRuntime.cpp
+@@ -489,6 +489,14 @@
+ IRT_END
+ 
+ 
++IRT_ENTRY(void, InterpreterRuntime::throw_if_locked_permanently(JavaThread* thread, oopDesc* obj)) {
++  Handle h_obj(thread, obj);
++  ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK);
++  thread->set_vm_result(h_obj());
++}
++IRT_END
++
++
+ //------------------------------------------------------------------------------------------------------------------------
+ // Fields
+ //
+diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
+--- a/src/share/vm/interpreter/interpreterRuntime.hpp
++++ b/src/share/vm/interpreter/interpreterRuntime.hpp
+@@ -107,6 +107,7 @@
+   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
+   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
+   static void    throw_pending_exception(JavaThread* thread);
++  static void    throw_if_locked_permanently(JavaThread* thread, oopDesc* obj);
+ 
+   // Statics & fields
+   static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
+diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
+--- a/src/share/vm/interpreter/templateTable.hpp
++++ b/src/share/vm/interpreter/templateTable.hpp
+@@ -303,6 +303,7 @@
+   static void getstatic(int byte_no);
+   static void putstatic(int byte_no);
+   static void pop_and_check_object(Register obj);
++  static void check_for_permanent_lock(Register obj);
+ 
+   static void _new();
+   static void newarray();
+diff --git a/src/share/vm/oops/markOop.cpp b/src/share/vm/oops/markOop.cpp
+--- a/src/share/vm/oops/markOop.cpp
++++ b/src/share/vm/oops/markOop.cpp
+@@ -46,6 +46,7 @@
+     assert(is_unlocked() || has_bias_pattern(), "just checking");
+     st->print("mark(");
+     if (has_bias_pattern())  st->print("biased,");
++    if (is_permanently_locked())  st->print("permanently_locked,");
+     st->print("hash %#lx,", hash());
+     st->print("age %d)", age());
+   }
+diff --git a/src/share/vm/oops/markOop.hpp b/src/share/vm/oops/markOop.hpp
+--- a/src/share/vm/oops/markOop.hpp
++++ b/src/share/vm/oops/markOop.hpp
+@@ -38,6 +38,7 @@
+ //  --------
+ //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
+ //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
++//             (bias flag):23 epoch:2 age:4    biased_lock:1 lock:2 (specially biased object)
+ //             size:32 ------------------------------------------>| (CMS free block)
+ //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
+ //
+@@ -45,13 +46,17 @@
+ //  --------
+ //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
+ //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
++//  (bias flag):54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (specially biased object)
+ //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
+ //  size:64 ----------------------------------------------------->| (CMS free block)
+ //
+-//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
+-//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
+-//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
+-//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
++//  COOPS (64-bit word, 32-bit pointer):
++//  ------------------------------------
++//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (normal object)
++//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (biased object)
++//  (bias flag):54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (specially biased object)
++//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (CMS promoted object)
++//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (CMS free block)
+ //
+ //  - hash contains the identity hash value: largest value is
+ //    31 bits, see os::random().  Also, 64-bit vm's require
+@@ -61,7 +66,9 @@
+ //
+ //  - the biased lock pattern is used to bias a lock toward a given
+ //    thread. When this pattern is set in the low three bits, the lock
+-//    is either biased toward a given thread or "anonymously" biased,
++//    is either biased toward a given thread or "specially" biased.
++//    Special biasing states are (1) permanent, meaning that the object
++//    can never be unlocked or rebiased, and (2) anonymous,
+ //    indicating that it is possible for it to be biased. When the
+ //    lock is biased toward a given thread, locking and unlocking can
+ //    be performed by that thread without using atomic operations.
+@@ -80,12 +87,13 @@
+ //    significant fraction of the eden semispaces and were not
+ //    promoted promptly, causing an increase in the amount of copying
+ //    performed. The runtime system aligns all JavaThread* pointers to
+-//    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
++//    a very large value (currently 2^9 bytes (32bVM) or 2^10 bytes (64bVM))
+ //    to make room for the age bits & the epoch bits (used in support of
+ //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
+ //
+ //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
+ //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
++//    [1           | epoch | age | 1 | 01]       lock is permanently biased
+ //
+ //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
+ //
+@@ -145,9 +153,16 @@
+   };
+ 
+   // Alignment of JavaThread pointers encoded in object header required by biased locking
+-  enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
++  // Also, special values for the bias value field
++  enum {
++    biased_lock_alignment    = 2 << (epoch_shift + epoch_bits),
++    anonymous_bias_value     = (0 * biased_lock_alignment), // must be zero (to allow bitwise OR)
++    permanent_lock_value     = (1 * biased_lock_alignment),
++    min_thread_bias_value    = (2 * biased_lock_alignment),
++    all_biased_lock_mask_bits =   (-biased_lock_alignment) | biased_lock_mask_in_place  // 0x...FFFE07
+   };
+ 
++
+ #ifdef _WIN64
+     // These values are too big for Win64
+     const static uintptr_t hash_mask = right_n_bits(hash_bits);
+@@ -179,16 +194,44 @@
+   // fixes up biased locks to be compatible with it when a bias is
+   // revoked.
+   bool has_bias_pattern() const {
+-    return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
++    return (mask_bits_match(value(), biased_lock_mask_in_place, biased_lock_pattern));
+   }
+-  JavaThread* biased_locker() const {
++  uintptr_t biased_locker_value() const {
+     assert(has_bias_pattern(), "should not call this otherwise");
+-    return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
++    return (intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)));
++  }
++  JavaThread* biased_locker_thread() const {
++    assert(has_bias_pattern() && !is_biased_specially(), "should not call this otherwise");
++    return (JavaThread*) biased_locker_value();
++  }
++  bool is_biased_to(Thread* thread) const {
++    return biased_locker_value() == (uintptr_t) thread;
++  }
++  // Indicates that the bias bit is set but no JavaThread is assigned yet.
++  bool is_biased_specially() const {
++    return (has_bias_pattern() && (value() < min_thread_bias_value)); 
+   }
+   // Indicates that the mark has the bias bit set but that it has not
+   // yet been biased toward a particular thread
+   bool is_biased_anonymously() const {
+-    return (has_bias_pattern() && (biased_locker() == NULL));
++    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, biased_locking_prototype()->value());
++    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == anonymous_bias_value)));
++    assert(z == z2, "methods must agree");
++    return z;
++  }
++  // Indicates that the mark has the bias bit set but is marked
++  // as not biasable toward any particular thread.
++  // When an object is in this state, it never leaves it,
++  // except temporarily during the GC.
++  // This state is in fact used to represent immutable ('final') objects.
++  bool is_permanently_locked() const {
++    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, permanently_locked_prototype()->value());
++    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == permanent_lock_value)));
++    assert(z == z2, "methods must agree");
++    return z;
++  }
++  bool has_revocable_bias_pattern() const {
++    return (has_bias_pattern() && !is_permanently_locked());
+   }
+   // Indicates epoch in which this bias was acquired. If the epoch
+   // changes due to too many bias revocations occurring, the biases
+@@ -207,20 +250,30 @@
+   }
+   // Prototype mark for initialization
+   static markOop biased_locking_prototype() {
+-    return markOop( biased_lock_pattern );
++    return markOop( anonymous_bias_value | biased_lock_pattern );
++  }
++  static markOop permanently_locked_prototype() {
++    return markOop( permanent_lock_value | biased_lock_pattern );
+   }
+ 
+   // lock accessors (note that these assume lock_shift == 0)
+   bool is_locked()   const {
+-    return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
++    return !mask_bits_match(value(), lock_mask_in_place, unlocked_value);
+   }
+   bool is_unlocked() const {
+-    return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
++    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
+   }
+   bool is_marked()   const {
+-    return (mask_bits(value(), lock_mask_in_place) == marked_value);
++    return mask_bits_match(value(), lock_mask_in_place, marked_value);
+   }
+-  bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
++  bool is_neutral()  const {
++    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
++  }
++  bool is_unlocked_unhashed() const {   // is_unlocked() && !has_hash()
++    return mask_bits_match(value(),
++                           hash_mask_in_place | lock_mask_in_place,
++                           no_hash_in_place   | unlocked_value);
++  }
+ 
+   // Special temporary state of the markOop while being inflated.
+   // Code that looks at mark outside a lock need to take this into account.
+@@ -345,8 +398,8 @@
+     return mask_bits(value() >> hash_shift, hash_mask);
+   }
+ 
+-  bool has_no_hash() const {
+-    return hash() == no_hash;
++  bool has_hash() const {
++    return hash() != no_hash;
+   }
+ 
+   // Prototype mark for initialization
+diff --git a/src/share/vm/oops/markOop.inline.hpp b/src/share/vm/oops/markOop.inline.hpp
+--- a/src/share/vm/oops/markOop.inline.hpp
++++ b/src/share/vm/oops/markOop.inline.hpp
+@@ -35,6 +35,7 @@
+   if (has_bias_pattern()) {
+     // Will reset bias at end of collection
+     // Mark words of biased and currently locked objects are preserved separately
++    assert(!is_permanently_locked(), "caller resp.");
+     return false;
+   }
+   markOop prototype_header = prototype_for_object(obj_containing_mark);
+@@ -43,13 +44,18 @@
+     // true for correctness
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Should this header be preserved during GC?
+ inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      prototype_for_object(obj_containing_mark)->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias(obj_containing_mark);
+ }
+ 
+@@ -70,14 +76,19 @@
+       prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Should this header be preserved in the case of a promotion failure
+ // during scavenge?
+ inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      prototype_for_object(obj_containing_mark)->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
+ }
+ 
+@@ -91,14 +102,19 @@
+       klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Same as must_be_preserved_for_promotion_failure() except that
+ // it takes a Klass* argument, instead of the object of which this is the mark word.
+ inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      klass_of_obj_containing_mark->prototype_header()->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
+ }
+ 
+diff --git a/src/share/vm/oops/oop.cpp b/src/share/vm/oops/oop.cpp
+--- a/src/share/vm/oops/oop.cpp
++++ b/src/share/vm/oops/oop.cpp
+@@ -106,10 +106,10 @@
+ 
+ intptr_t oopDesc::slow_identity_hash() {
+   // slow case; we have to acquire the micro lock in order to locate the header
++  Thread* THREAD = Thread::current();
+   ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
+-  HandleMark hm;
+-  Handle object(this);
+-  return ObjectSynchronizer::identity_hash_value_for(object);
++  HandleMark hm(THREAD);
++  return ObjectSynchronizer::fast_hash_code(THREAD, this);
+ }
+ 
+ // When String table needs to rehash
+diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
+--- a/src/share/vm/oops/oop.hpp
++++ b/src/share/vm/oops/oop.hpp
+@@ -284,6 +284,7 @@
+   bool is_locked()   const;
+   bool is_unlocked() const;
+   bool has_bias_pattern() const;
++  bool is_permanently_locked() const;
+ 
+   // asserts
+   bool is_oop(bool ignore_mark_word = false) const;
+diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
+--- a/src/share/vm/oops/oop.inline.hpp
++++ b/src/share/vm/oops/oop.inline.hpp
+@@ -602,6 +602,10 @@
+   return mark()->has_bias_pattern();
+ }
+ 
++inline bool oopDesc::is_permanently_locked() const {
++  return mark()->is_permanently_locked();
++}
++
+ 
+ // used only for asserts
+ inline bool oopDesc::is_oop(bool ignore_mark_word) const {
+@@ -716,7 +720,7 @@
+   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
+   // Note: The mark must be read into local variable to avoid concurrent updates.
+   markOop mrk = mark();
+-  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
++  if (mrk->is_unlocked() && mrk->has_hash()) {
+     return mrk->hash();
+   } else if (mrk->is_marked()) {
+     return mrk->hash();
+diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
+--- a/src/share/vm/prims/jvm.cpp
++++ b/src/share/vm/prims/jvm.cpp
+@@ -512,10 +512,17 @@
+ // java.lang.Object ///////////////////////////////////////////////
+ 
+ 
+-JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle))
++JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle)) {
+   JVMWrapper("JVM_IHashCode");
+   // as implemented in the classic virtual machine; return 0 if object is NULL
+-  return handle == NULL ? 0 : ObjectSynchronizer::FastHashCode (THREAD, JNIHandles::resolve_non_null(handle)) ;
++  if (handle == NULL)  return 0;
++  oop obj = JNIHandles::resolve_non_null(handle);
++  jint hc = ObjectSynchronizer::fast_hash_code(THREAD, obj);
++  if (hc == markOopDesc::no_hash && CheckFinalObjects) {
++    ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK_0);
++  }
++  return hc;
++}
+ JVM_END
+ 
+ 
+diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
+--- a/src/share/vm/prims/unsafe.cpp
++++ b/src/share/vm/prims/unsafe.cpp
+@@ -1140,6 +1140,33 @@
+ UNSAFE_END
+ 
+ 
++UNSAFE_ENTRY(jobject, Unsafe_LockPermanently(JNIEnv *env, jobject unsafe, jobject jobj))
++  UnsafeWrapper("Unsafe_LockPermanently");
++  {
++    if (jobj == NULL) {
++      THROW_0(vmSymbols::java_lang_NullPointerException());
++    }
++    oop obj = JNIHandles::resolve_non_null(jobj);
++    obj = ObjectSynchronizer::lock_permanently(obj, CHECK_0);
++    assert(obj->is_permanently_locked(), "must be now");
++    if (obj != JNIHandles::resolve_non_null(jobj))
++      jobj = JNIHandles::make_local(env, obj);
++    return jobj;
++  }
++UNSAFE_END
++
++
++UNSAFE_ENTRY(bool, Unsafe_IsPermanentlyLocked(JNIEnv *env, jobject unsafe, jobject jobj))
++  UnsafeWrapper("Unsafe_IsPermanentlyLocked");
++  {
++    if (jobj == NULL) {
++      THROW_0(vmSymbols::java_lang_NullPointerException());
++    }
++    return JNIHandles::resolve_non_null(jobj)->is_permanently_locked();
++  }
++UNSAFE_END
++
++
+ UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr))
+   UnsafeWrapper("Unsafe_ThrowException");
+   {
+@@ -1582,6 +1609,11 @@
+     {CC"shouldBeInitialized",CC"("CLS")Z",               FN_PTR(Unsafe_ShouldBeInitialized)},
+ };
+ 
++JNINativeMethod lockperm_methods[] = {
++    {CC"lockPermanently",    CC"("OBJ")"OBJ,             FN_PTR(Unsafe_LockPermanently)},
++    {CC"isPermanentlyLocked",CC"("OBJ")Z",               FN_PTR(Unsafe_IsPermanentlyLocked)},
++};
++
+ #undef CC
+ #undef FN_PTR
+ 
+@@ -1661,6 +1693,15 @@
+         env->ExceptionClear();
+       }
+     }
++    if (EnableFinalObjects) {
++      env->RegisterNatives(unsafecls, lockperm_methods, sizeof(lockperm_methods)/sizeof(JNINativeMethod));
++      if (env->ExceptionOccurred()) {
++        if (PrintMiscellaneous && (Verbose || WizardMode)) {
++          tty->print_cr("Warning:  support for EnableFinalObjects in Unsafe not found.");
++        }
++        env->ExceptionClear();
++      }
++    }
+     int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
+     if (env->ExceptionOccurred()) {
+       if (PrintMiscellaneous && (Verbose || WizardMode)) {
+diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp
+--- a/src/share/vm/runtime/biasedLocking.cpp
++++ b/src/share/vm/runtime/biasedLocking.cpp
+@@ -145,7 +145,7 @@
+ 
+ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
+   markOop mark = obj->mark();
+-  if (!mark->has_bias_pattern()) {
++  if (!mark->has_revocable_bias_pattern()) {
+     if (TraceBiasedLocking) {
+       ResourceMark rm;
+       tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
+@@ -164,8 +164,9 @@
+                   (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
+   }
+ 
+-  JavaThread* biased_thread = mark->biased_locker();
+-  if (biased_thread == NULL) {
++  uintptr_t biased_thread_bits = mark->biased_locker_value();
++  assert((JavaThread*) markOopDesc::anonymous_bias_value == NULL, "anonymous bias encoding must be ptr NULL");
++  if (biased_thread_bits == markOopDesc::anonymous_bias_value) {
+     // Object is anonymously biased. We can get here if, for
+     // example, we revoke the bias due to an identity hash code
+     // being computed for an object.
+@@ -176,9 +177,16 @@
+       tty->print_cr("  Revoked bias of anonymously-biased object");
+     }
+     return BiasedLocking::BIAS_REVOKED;
++  } else if (biased_thread_bits == markOopDesc::permanent_lock_value) {
++    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
++      tty->print_cr("  Cannot revoke bias of permanently-biased object");
++    }
++    assert(EnableFinalObjects, "this bit pattern is possible only if enabled");
++    return BiasedLocking::PERMANENTLY_LOCKED;
+   }
+ 
+   // Handle case where the thread toward which the object was biased has exited
++  JavaThread* biased_thread = (JavaThread*) biased_thread_bits;
+   bool thread_is_alive = false;
+   if (requesting_thread == biased_thread) {
+     thread_is_alive = true;
+@@ -265,7 +273,7 @@
+ 
+ static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
+   markOop mark = o->mark();
+-  if (!mark->has_bias_pattern()) {
++  if (!mark->has_revocable_bias_pattern()) {
+     return HR_NOT_BIASED;
+   }
+ 
+@@ -329,6 +337,9 @@
+                   (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
+   }
+ 
++  assert(!o->mark()->is_permanently_locked(),
++         "should not revoke or rebias permanently biased object");
++
+   jlong cur_time = os::javaTimeMillis();
+   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
+ 
+@@ -347,7 +358,7 @@
+     // try to update the epoch -- assume another VM operation came in
+     // and reset the header to the unbiased state, which will
+     // implicitly cause all existing biases to be revoked
+-    if (klass->prototype_header()->has_bias_pattern()) {
++    if (klass->prototype_header()->has_revocable_bias_pattern()) {
+       int prev_epoch = klass->prototype_header()->bias_epoch();
+       klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
+       int cur_epoch = klass->prototype_header()->bias_epoch();
+@@ -360,7 +371,7 @@
+           MonitorInfo* mon_info = cached_monitor_info->at(i);
+           oop owner = mon_info->owner();
+           markOop mark = owner->mark();
+-          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
++          if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
+             // We might have encountered this object already in the case of recursive locking
+             assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+             owner->set_mark(mark->set_bias_epoch(cur_epoch));
+@@ -371,7 +382,7 @@
+ 
+     // At this point we're done. All we have to do is potentially
+     // adjust the header of the given object to revoke its bias.
+-    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
++    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_revocable_bias_pattern(), true, requesting_thread);
+   } else {
+     if (TraceBiasedLocking) {
+       ResourceMark rm;
+@@ -392,7 +403,7 @@
+         MonitorInfo* mon_info = cached_monitor_info->at(i);
+         oop owner = mon_info->owner();
+         markOop mark = owner->mark();
+-        if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
++        if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
+           revoke_bias(owner, false, true, requesting_thread);
+         }
+       }
+@@ -410,8 +421,8 @@
+   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
+ 
+   if (attempt_rebias_of_object &&
+-      o->mark()->has_bias_pattern() &&
+-      klass->prototype_header()->has_bias_pattern()) {
++      o->mark()->has_revocable_bias_pattern() &&
++      klass->prototype_header()->has_revocable_bias_pattern()) {
+     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
+                                            klass->prototype_header()->bias_epoch());
+     o->set_mark(new_mark);
+@@ -421,8 +432,8 @@
+     }
+   }
+ 
+-  assert(!o->mark()->has_bias_pattern() ||
+-         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
++  assert(!o->mark()->has_revocable_bias_pattern() ||
++         (attempt_rebias_of_object && (o->mark()->is_biased_to(requesting_thread))),
+          "bug in bulk bias revocation");
+ 
+   return status_code;
+@@ -465,13 +476,13 @@
+     // there is nothing to do and we avoid a safepoint.
+     if (_obj != NULL) {
+       markOop mark = (*_obj)()->mark();
+-      if (mark->has_bias_pattern()) {
++      if (mark->has_revocable_bias_pattern()) {
+         return true;
+       }
+     } else {
+       for ( int i = 0 ; i < _objs->length(); i++ ) {
+         markOop mark = (_objs->at(i))()->mark();
+-        if (mark->has_bias_pattern()) {
++        if (mark->has_revocable_bias_pattern()) {
+           return true;
+         }
+       }
+@@ -545,9 +556,10 @@
+     if (res_mark == biased_value) {
+       return BIAS_REVOKED;
+     }
+-  } else if (mark->has_bias_pattern()) {
++  } else if (mark->has_revocable_bias_pattern()) {
+     Klass* k = Klass::cast(obj->klass());
+     markOop prototype_header = k->prototype_header();
++    assert(!prototype_header->is_permanently_locked(), "object cannot be normal if klass is permanently biased");
+     if (!prototype_header->has_bias_pattern()) {
+       // This object has a stale bias from before the bulk revocation
+       // for this data type occurred. It's pointless to update the
+@@ -592,7 +604,7 @@
+   } else if (heuristics == HR_SINGLE_REVOKE) {
+     Klass *k = Klass::cast(obj->klass());
+     markOop prototype_header = k->prototype_header();
+-    if (mark->biased_locker() == THREAD &&
++    if (mark->is_biased_to(THREAD) &&
+         prototype_header->bias_epoch() == mark->bias_epoch()) {
+       // A thread is trying to revoke the bias of an object biased
+       // toward it, again likely due to an identity hash code
+diff --git a/src/share/vm/runtime/biasedLocking.hpp b/src/share/vm/runtime/biasedLocking.hpp
+--- a/src/share/vm/runtime/biasedLocking.hpp
++++ b/src/share/vm/runtime/biasedLocking.hpp
+@@ -161,7 +161,8 @@
+   enum Condition {
+     NOT_BIASED = 1,
+     BIAS_REVOKED = 2,
+-    BIAS_REVOKED_AND_REBIASED = 3
++    BIAS_REVOKED_AND_REBIASED = 3,
++    PERMANENTLY_LOCKED = 4
+   };
+ 
+   // This initialization routine should only be called once and
+diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
+--- a/src/share/vm/runtime/deoptimization.cpp
++++ b/src/share/vm/runtime/deoptimization.cpp
+@@ -940,12 +940,12 @@
+       assert(mon_info->owner() != NULL, "reallocation was missed");
+       Handle obj = Handle(mon_info->owner());
+       markOop mark = obj->mark();
+-      if (UseBiasedLocking && mark->has_bias_pattern()) {
++      if (UseBiasedLocking && mark->has_revocable_bias_pattern()) {
+         // New allocated objects may have the mark set to anonymously biased.
+         // Also the deoptimized method may called methods with synchronization
+         // where the thread-local object is bias locked to the current thread.
+         assert(mark->is_biased_anonymously() ||
+-               mark->biased_locker() == thread, "should be locked to current thread");
++               mark->is_biased_to(thread), "should be locked to current thread");
+         // Reset mark word to unbiased prototype.
+         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+         obj->set_mark(unbiased_prototype);
+diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
+--- a/src/share/vm/runtime/globals.hpp
++++ b/src/share/vm/runtime/globals.hpp
+@@ -3565,6 +3565,12 @@
+   experimental(bool, TrustFinalNonStaticFields, false,                      \
+           "trust final non-static declarations for constant folding")       \
+                                                                             \
++  experimental(bool, EnableFinalObjects, false,                             \
++          "support objects which are fully immutable")                      \
++                                                                            \
++  experimental(bool, CheckFinalObjects, true,                               \
++          "throw exceptions on illegal operations on immutable objects")    \
++                                                                            \
+   develop(bool, TraceInvokeDynamic, false,                                  \
+           "trace internal invoke dynamic operations")                       \
+                                                                             \
+diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
+--- a/src/share/vm/runtime/synchronizer.cpp
++++ b/src/share/vm/runtime/synchronizer.cpp
+@@ -165,9 +165,20 @@
+ // some assembly copies of this code. Make sure update those code
+ // if the following function is changed. The implementation is
+ // extremely sensitive to race condition. Be careful.
++//
++// In the interpreter, InterpreterGenerator::lock_method and
++// TemplateTable::monitorenter both call masm->lock_object.
++// The interpreter slow path calls InterpreterRuntime::monitorenter.
++// In C1, inline copies are enabled by UseFastLocking.  LIR_Assembler::emit_lock
++// calls masm->lock_object, and Runtime1::monitorenter provides the slow path.
++// In C2, the Fast_Lock encoding calls masm->compiler_lock_object.
++// (It may also inline the code straight into the AD file; yuck.)
++// Some optimized instances of this code (in C2) refer to EmitSync.
++// Some optimized instances in C2 vary depending on UseOptoBiasInlining.
++// Some compiled slow paths go through SharedRuntime::complete_monitor_locking.
+ 
+ void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
+- if (UseBiasedLocking) {
++  if (UseBiasedLocking) {
+     if (!SafepointSynchronize::is_at_safepoint()) {
+       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
+       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
+@@ -177,7 +188,7 @@
+       assert(!attempt_rebias, "can not rebias toward VM thread");
+       BiasedLocking::revoke_at_safepoint(obj);
+     }
+-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
++    assert(!obj->mark()->has_revocable_bias_pattern(), "biases should be revoked by now");
+  }
+ 
+  slow_enter (obj, lock, THREAD) ;
+@@ -226,6 +237,9 @@
+ // failed in the interpreter/compiler code.
+ void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+   markOop mark = obj->mark();
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(mark, obj(), CHECK);
++  }
+   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+ 
+   if (mark->is_neutral()) {
+@@ -311,6 +325,9 @@
+ void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+   // the current locking is from JNI instead of Java code
+   TEVENT (jni_enter) ;
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -322,6 +339,9 @@
+ 
+ // NOTE: must use heavy weight monitor to handle jni monitor enter
+ bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK_(false));
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -375,6 +395,9 @@
+ //  Wait/Notify/NotifyAll
+ // NOTE: must use heavy weight monitor to handle wait()
+ void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -394,6 +417,9 @@
+ }
+ 
+ void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -406,7 +432,10 @@
+ }
+ 
+ void ObjectSynchronizer::notify(Handle obj, TRAPS) {
+- if (UseBiasedLocking) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
++  if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+   }
+@@ -420,6 +449,9 @@
+ 
+ // NOTE: see comment of notify()
+ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -601,8 +633,8 @@
+   return value;
+ }
+ //
+-intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
+-  if (UseBiasedLocking) {
++intptr_t ObjectSynchronizer::fast_hash_code(Thread* THREAD, oop obj) {
++  if (UseBiasedLocking || EnableFinalObjects) {
+     // NOTE: many places throughout the JVM do not expect a safepoint
+     // to be taken here, in particular most operations on perm gen
+     // objects. However, we only ever bias Java instances and all of
+@@ -610,14 +642,18 @@
+     // been checked to make sure they can handle a safepoint. The
+     // added check of the bias pattern is to avoid useless calls to
+     // thread-local storage.
+-    if (obj->mark()->has_bias_pattern()) {
++    markOop mark = obj->mark();
++    //throw_if_locked_permanently(mark, obj, CATCH);  // cannot throw here
++    if (mark->is_permanently_locked())
++      return markOopDesc::no_hash;  // return null value to caller
++    if (mark->has_bias_pattern()) {
+       // Box and unbox the raw reference just in case we cause a STW safepoint.
+-      Handle hobj (Self, obj) ;
++      Handle hobj(THREAD, obj);
+       // Relaxing assertion for bug 6320749.
+       assert (Universe::verify_in_progress() ||
+               !SafepointSynchronize::is_at_safepoint(),
+              "biases should not be seen by VM thread here");
+-      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
++      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
+       obj = hobj() ;
+       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+     }
+@@ -628,9 +664,9 @@
+   assert (Universe::verify_in_progress() ||
+           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-          Self->is_Java_thread() , "invariant") ;
++          THREAD->is_Java_thread() , "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
++         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
+ 
+   ObjectMonitor* monitor = NULL;
+   markOop temp, test;
+@@ -645,7 +681,7 @@
+     if (hash) {                       // if it has hash, just return it
+       return hash;
+     }
+-    hash = get_next_hash(Self, obj);  // allocate a new hash code
++    hash = get_next_hash(THREAD, obj);  // allocate a new hash code
+     temp = mark->copy_set_hash(hash); // merge the hash code into header
+     // use (machine word version) atomic operation to install the hash
+     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
+@@ -664,7 +700,7 @@
+       return hash;
+     }
+     // Skip to the following code to reduce code size
+-  } else if (Self->is_lock_owned((address)mark->locker())) {
++  } else if (THREAD->is_lock_owned((address)mark->locker())) {
+     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
+     assert (temp->is_neutral(), "invariant") ;
+     hash = temp->hash();              // by current thread, check if the displaced
+@@ -683,13 +719,13 @@
+   }
+ 
+   // Inflate the monitor to set hash code
+-  monitor = ObjectSynchronizer::inflate(Self, obj);
++  monitor = ObjectSynchronizer::inflate(THREAD, obj);
+   // Load displaced header and check it has hash code
+   mark = monitor->header();
+   assert (mark->is_neutral(), "invariant") ;
+   hash = mark->hash();
+   if (hash == 0) {
+-    hash = get_next_hash(Self, obj);
++    hash = get_next_hash(THREAD, obj);
+     temp = mark->copy_set_hash(hash); // merge hash code into header
+     assert (temp->is_neutral(), "invariant") ;
+     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+@@ -706,15 +742,83 @@
+   return hash;
+ }
+ 
+-// Deprecated -- use FastHashCode() instead.
++// -----------------------------------------------------------------------------
++// Permanently lock an object, and mark it immutable.
++// This operation includes a releasing store to memory, to flush all final field values.
++oop ObjectSynchronizer::lock_permanently(oop obj, TRAPS) {
++  if (!EnableFinalObjects) {
++    ResourceMark rm(THREAD);
++    THROW_MSG_0(vmSymbols::java_lang_InternalError(), "EnableFinalObjects is false");
++  }
++  // Lock the object permanently.  This makes it immutable.
++  markOop mark = obj->mark();
++  if (mark->is_unlocked() ||
++      mark->is_biased_anonymously()) {
++    markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
++    if ((markOop) Atomic::cmpxchg_ptr(perm_lock_mark, obj->mark_addr(), mark) == mark) {
++      // cmpxchg_ptr includes store-release fence
++      TEVENT (lock_permanently: fast path) ;
++      return obj;
++    }
++  }
+ 
+-intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
+-  return FastHashCode (Thread::current(), obj()) ;
++  if (true) {
++    // FIXME: Need to inflate and mess around some more.
++    ResourceMark rm(THREAD);
++    THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), "object is locked, cannot be permanently locked");
++  }
++
++#if 0 //@@
++  if (UseBiasedLocking) {
++    if (mark->has_bias_pattern()) {
++      // Box and unbox the raw reference just in case we cause a STW safepoint.
++      Handle hobj(THREAD, obj);
++      assert (Universe::verify_in_progress() ||
++              !SafepointSynchronize::is_at_safepoint(),
++             "biases should not be seen by VM thread here");
++      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
++      obj = hobj() ;
++      mark = obj->mark();
++    }
++  }
++
++  assert (Universe::verify_in_progress() ||
++          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
++  assert (Universe::verify_in_progress() ||
++          THREAD->is_Java_thread() , "invariant") ;
++  assert (Universe::verify_in_progress() ||
++         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
++
++  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
++  mark = monitor->header();
++  assert(mark->is_neutral(), "invariant") ;
++  markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
++  markOop test = (markOop) Atomic::cmpxchg_ptr(perm_lock_mark, monitor, mark);
++  //@@ FIXME: the only updates to monitor header are (at present) hash code updates
++  //@@ FIXME: must transition the inflated monitor to a permanently-locked state
++  // When we call deflate_monitor at a safepoint, this must put the object into its proper state
++#endif //@@
++
++  return obj;
++}
++
++
++// Throw an appropriate error if the object cannot be synchronized.
++void ObjectSynchronizer::throw_if_locked_permanently(markOop mark, oop obj, TRAPS) {
++  assert(EnableFinalObjects, "");
++  if (mark->is_permanently_locked()) {
++    ResourceMark rm(THREAD);
++    THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "immutable object is permanently locked");
++  }
+ }
+ 
+ 
+ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
+                                                    Handle h_obj) {
++  if (EnableFinalObjects) {
++    if (h_obj->mark()->is_permanently_locked())
++      return false;
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
+     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -753,6 +857,9 @@
+ 
+   // Possible mark states: neutral, biased, stack-locked, inflated
+ 
++  if (EnableFinalObjects && h_obj()->mark()->is_permanently_locked()) {
++    return owner_none;
++  }
+   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+     // CASE: biased
+     BiasedLocking::revoke_and_rebias(h_obj, false, self);
+@@ -787,6 +894,10 @@
+ 
+ // FIXME: jvmti should call this
+ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
++  if (EnableFinalObjects) {
++    if (h_obj->mark()->is_permanently_locked())
++      return NULL;
++  }
+   if (UseBiasedLocking) {
+     if (SafepointSynchronize::is_at_safepoint()) {
+       BiasedLocking::revoke_at_safepoint(h_obj);
+diff --git a/src/share/vm/runtime/synchronizer.hpp b/src/share/vm/runtime/synchronizer.hpp
+--- a/src/share/vm/runtime/synchronizer.hpp
++++ b/src/share/vm/runtime/synchronizer.hpp
+@@ -55,6 +55,9 @@
+   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
+   static void fast_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+ 
++  // Lock the object permanently.  This makes it immutable.
++  static oop lock_permanently(oop obj, TRAPS);
++
+   // WARNING: They are ONLY used to handle the slow cases. They should
+   // only be used when the fast cases failed. Use of these functions
+   // without previous fast case check may cause fatal error.
+@@ -84,6 +87,7 @@
+   static void reenter            (Handle obj, intptr_t recursion, TRAPS);
+ 
+   // thread-specific and global objectMonitor free list accessors
++  // Self is the current thread, declared Thread* THREAD or TRAPS elsewhere.
+ //  static void verifyInUse (Thread * Self) ; too slow for general assert/debug
+   static ObjectMonitor * omAlloc (Thread * Self) ;
+   static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
+@@ -96,12 +100,14 @@
+ 
+   // Returns the identity hash value for an oop
+   // NOTE: It may cause monitor inflation
+-  static intptr_t identity_hash_value_for(Handle obj);
+-  static intptr_t FastHashCode (Thread * Self, oop obj) ;
++  static intptr_t fast_hash_code(Thread* THREAD, oop obj);
++
++  // Throw an appropriate error if the object cannot be synchronized.
++  static void throw_if_locked_permanently(markOop mark, oop obj, TRAPS);
+ 
+   // java.lang.Thread support
+-  static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
+-  static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
++  static bool current_thread_holds_lock(JavaThread* THREAD, Handle h_obj);
++  static LockOwnership query_lock_ownership(JavaThread* THREAD, Handle h_obj);
+ 
+   static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
+ 
+diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
+--- a/src/share/vm/utilities/globalDefinitions.hpp
++++ b/src/share/vm/utilities/globalDefinitions.hpp
+@@ -959,7 +959,8 @@
+ inline void clear_bits    (intptr_t& x, intptr_t m) { x &= ~m; }
+ inline intptr_t mask_bits      (intptr_t  x, intptr_t m) { return x & m; }
+ inline jlong    mask_long_bits (jlong     x, jlong    m) { return x & m; }
+-inline bool mask_bits_are_true (intptr_t flags, intptr_t mask) { return (flags & mask) == mask; }
++inline bool mask_bits_are_true (intptr_t flags, intptr_t mask)                 { return (flags & mask) == mask; }
++inline bool mask_bits_match    (intptr_t flags, intptr_t mask, intptr_t value) { return (flags & mask) == value; }
+ 
+ // bit-operations using the n.th bit
+ inline void    set_nth_bit(intptr_t& x, int n) { set_bits  (x, nth_bit(n)); }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/value-obj-8001111.txt	Wed Oct 17 21:46:25 2012 -0700
@@ -0,0 +1,44 @@
+Infrastructure for immutable objects, in support of value types.
+Such objects are created privately mutable, and then locked for publication.
+See <http://blogs.oracle.com/jrose/entry/value_types_in_the_vm>
+and <http://blogs.oracle.com/jrose/entry/larval_objects_in_the_vm>.
+For more general background, see [Rich Hickey's talk on Values](http://www.infoq.com/presentations/Value-Values).
+
+The term _immutable_ is a general term for certain classes of data structures.
+Inside the JVM, we need a specific, positive term for an object which has been made immutable.
+We could say it has been made _final_ or _frozen_, but instead will repurpose the term _locked_.
+This is not a perfect choice, since immutability is only partially related to synchronization.
+The term allows intuitive API names like `Arrays.lockedCopyOf` or `Objects.cloneAsLocked`.
+An object which is immutable is called _permanently locked_, or (if there is no ambiguity) simply _locked_.
+
+Rules for permanently locked objects:
+
+- restrictions on classes of locked objects
+    - all non-static fields must be final
+    - there must be no finalizer method (no override to `Object.finalize`)
+    - these restrictions apply to any superclasses as well
+    - an array can be marked locked, but then (of course) its elements cannot be stored to
+    - if not an array, the object's class must implement the marker type `PermanentlyLockable` (is this a good idea?)
+- restricted operations on locked objects (could be enforced, or else documented as producing undefined results)
+    - do not use any astore or putfield instructions, nor their reflective equivalents, to change any field
+    - do not lock (you may get a hang or a LockedObjectException)
+    - do not test for pointer equality; use Object.equals instead (there may be a test for this)
+    - do not ask for an identity hash code; use Object.hashCode instead (there may be a test for this)
+    - do not call wait, notify, or notifyAll methods in Object
+    - at the time it is marked locked, an object's monitor must not be locked (in fact, should never have been?)
+- side effects
+    - elements of locked arrays are stably available to readers just like final object fields (i.e., there is a memory fence)
+    - a locked object can be locked again, with no additional effect
+    - any attempt to mutate a permanently locked object raises java.lang.LockedObjectException
+    - any attempt to synchronize on a permanently locked object raises java.lang.LockedObjectException
+- object lifecycle
+    - all objects are initially created in a normal (unlocked) state
+    - an object marked locked cannot be "unlocked" (reverted to a normal state)
+    - an object marked locked must be unreferenced by any other thread (can we enforce this?)
+    - the reference returned from the (unsafe) marking primitive must be used for all future accesses
+    - any previous references (including the one passed to the marking primitive) must be unused
+    - in practice, this means you must mark an object locked immediately after constructing it
+- API
+    - the method `lockPermanently` is used to lock an object permanently
+    - there is a predicate `isLockedPermanently` which can test whether an object is locked or not
+    - for initial experiments, these methods are in `sun.misc.Unsafe`; perhaps they belong on `Object` (cf. `clone`)
--- a/value-obj.patch	Mon Oct 15 17:45:20 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1054 +0,0 @@
-Infrastructure for immutable objects, in support of value types.
-DONE:
-- implement object header representation (variation of biased lock)
-- enforce immutability in interpreter
-TO DO:
-- enforce immutability in JNI, Unsafe, and Core Reflection
-- enforce immutability in compiled code (C1, C2)
-- merge redundant checks (like null checks) before multiple xastore and putfield instructions (C1, C2)
-- constant-fold field and element values in locked objects (C1, C2), cf. stable arrays experiment
-- experiment with limiting effect of locking to parts of the class hierarchy (to allow type-based optimizations)
-- experiment lifting acmp and identityHashCode to equals and hashCode (i.e., method calls)
-- make the wrapper types (`Integer`, etc.) be permanently lockable, and have `valueOf` calls produce locked objects
-- make (selected) String instances be permanently locked?  (perhaps not; notion of interning may be too deeply embedded)
-- as a layer on top of this: value-oriented calling sequences (allowing arbitrary box/unbox ops at compiler discretion)
-- as a layer on top of this: customized species of `ArrayList<Integer>`, etc.
-
-diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp
---- a/src/cpu/x86/vm/templateTable_x86_64.cpp
-+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp
-@@ -601,6 +601,11 @@
-   // destroys rbx
-   // check array
-   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
-+  const Bytecodes::Code code = bytecode();
-+  if (Bytecodes::is_memory_write(code)) {
-+    assert_different_registers(array, index, rscratch1);
-+    check_for_permanent_lock(array);  // uses rscratch1
-+  }
-   // sign extend index for use by indexed load
-   __ movl2ptr(index, index);
-   // check index
-@@ -2237,6 +2242,24 @@
-   __ pop_ptr(r);
-   __ null_check(r);  // for field access must check obj.
-   __ verify_oop(r);
-+  const Bytecodes::Code code = bytecode();
-+  if (Bytecodes::is_memory_write(code)) {
-+    check_for_permanent_lock(r);
-+  }
-+}
-+
-+void TemplateTable::check_for_permanent_lock(Register r) {
-+  if (EnableFinalObjects && CheckFinalObjects) {
-+    Label L;
-+    Address mark(r, oopDesc::mark_offset_in_bytes());
-+    __ movptr(mark, rscratch1);
-+    __ andptr(rscratch1, markOopDesc::all_biased_lock_mask_bits);
-+    __ cmpptr(rscratch1, (intptr_t) markOopDesc::permanently_locked_prototype());
-+    __ jcc(Assembler::notEqual, L);
-+    address fn = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_if_locked_permanently);
-+    __ call_VM(r, fn, r);
-+    __ bind(L);
-+  }
- }
- 
- void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
-diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
---- a/src/share/vm/interpreter/bytecodes.hpp
-+++ b/src/share/vm/interpreter/bytecodes.hpp
-@@ -419,6 +419,7 @@
-                                                                              || code == _aload_2  || code == _aload_3); }
-   static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
-                                                                              || code == _astore_2 || code == _astore_3); }
-+  static bool        is_memory_write(Code code)    { return (code == _putfield || (code >= _iastore && code <= _sastore)); }
- 
-   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
-                                                            || code == _fconst_0 || code == _dconst_0); }
-diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
---- a/src/share/vm/interpreter/interpreterRuntime.cpp
-+++ b/src/share/vm/interpreter/interpreterRuntime.cpp
-@@ -489,6 +489,14 @@
- IRT_END
- 
- 
-+IRT_ENTRY(void, InterpreterRuntime::throw_if_locked_permanently(JavaThread* thread, oopDesc* obj)) {
-+  Handle h_obj(thread, obj);
-+  ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK);
-+  thread->set_vm_result(h_obj());
-+}
-+IRT_END
-+
-+
- //------------------------------------------------------------------------------------------------------------------------
- // Fields
- //
-diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
---- a/src/share/vm/interpreter/interpreterRuntime.hpp
-+++ b/src/share/vm/interpreter/interpreterRuntime.hpp
-@@ -107,6 +107,7 @@
-   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
-   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
-   static void    throw_pending_exception(JavaThread* thread);
-+  static void    throw_if_locked_permanently(JavaThread* thread, oopDesc* obj);
- 
-   // Statics & fields
-   static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
-diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
---- a/src/share/vm/interpreter/templateTable.hpp
-+++ b/src/share/vm/interpreter/templateTable.hpp
-@@ -303,6 +303,7 @@
-   static void getstatic(int byte_no);
-   static void putstatic(int byte_no);
-   static void pop_and_check_object(Register obj);
-+  static void check_for_permanent_lock(Register obj);
- 
-   static void _new();
-   static void newarray();
-diff --git a/src/share/vm/oops/markOop.cpp b/src/share/vm/oops/markOop.cpp
---- a/src/share/vm/oops/markOop.cpp
-+++ b/src/share/vm/oops/markOop.cpp
-@@ -46,6 +46,7 @@
-     assert(is_unlocked() || has_bias_pattern(), "just checking");
-     st->print("mark(");
-     if (has_bias_pattern())  st->print("biased,");
-+    if (is_permanently_locked())  st->print("permanently_locked,");
-     st->print("hash %#lx,", hash());
-     st->print("age %d)", age());
-   }
-diff --git a/src/share/vm/oops/markOop.hpp b/src/share/vm/oops/markOop.hpp
---- a/src/share/vm/oops/markOop.hpp
-+++ b/src/share/vm/oops/markOop.hpp
-@@ -38,6 +38,7 @@
- //  --------
- //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
- //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
-+//             (bias flag):23 epoch:2 age:4    biased_lock:1 lock:2 (specially biased object)
- //             size:32 ------------------------------------------>| (CMS free block)
- //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
- //
-@@ -45,13 +46,17 @@
- //  --------
- //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
- //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
-+//  (bias flag):54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (specially biased object)
- //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
- //  size:64 ----------------------------------------------------->| (CMS free block)
- //
--//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
--//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
--//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
--//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
-+//  COOPS (64-bit word, 32-bit pointer):
-+//  ------------------------------------
-+//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (normal object)
-+//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (biased object)
-+//  (bias flag):54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (specially biased object)
-+//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (CMS promoted object)
-+//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (CMS free block)
- //
- //  - hash contains the identity hash value: largest value is
- //    31 bits, see os::random().  Also, 64-bit vm's require
-@@ -61,7 +66,9 @@
- //
- //  - the biased lock pattern is used to bias a lock toward a given
- //    thread. When this pattern is set in the low three bits, the lock
--//    is either biased toward a given thread or "anonymously" biased,
-+//    is either biased toward a given thread or "specially" biased.
-+//    Special biasing states are (1) permanent, meaning that the object
-+//    can never be unlocked or rebiased, and (2) anonymous,
- //    indicating that it is possible for it to be biased. When the
- //    lock is biased toward a given thread, locking and unlocking can
- //    be performed by that thread without using atomic operations.
-@@ -80,12 +87,13 @@
- //    significant fraction of the eden semispaces and were not
- //    promoted promptly, causing an increase in the amount of copying
- //    performed. The runtime system aligns all JavaThread* pointers to
--//    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
-+//    a very large value (currently 2^9 bytes (32bVM) or 2^10 bytes (64bVM))
- //    to make room for the age bits & the epoch bits (used in support of
- //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
- //
- //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
- //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
-+//    [1           | epoch | age | 1 | 01]       lock is permanently biased
- //
- //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
- //
-@@ -145,9 +153,16 @@
-   };
- 
-   // Alignment of JavaThread pointers encoded in object header required by biased locking
--  enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
-+  // Also, special values for the bias value field
-+  enum {
-+    biased_lock_alignment    = 2 << (epoch_shift + epoch_bits),
-+    anonymous_bias_value     = (0 * biased_lock_alignment), // must be zero (to allow bitwise OR)
-+    permanent_lock_value     = (1 * biased_lock_alignment),
-+    min_thread_bias_value    = (2 * biased_lock_alignment),
-+    all_biased_lock_mask_bits =   (-biased_lock_alignment) | biased_lock_mask_in_place  // 0x...FFFE07
-   };
- 
-+
- #ifdef _WIN64
-     // These values are too big for Win64
-     const static uintptr_t hash_mask = right_n_bits(hash_bits);
-@@ -179,16 +194,44 @@
-   // fixes up biased locks to be compatible with it when a bias is
-   // revoked.
-   bool has_bias_pattern() const {
--    return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
-+    return (mask_bits_match(value(), biased_lock_mask_in_place, biased_lock_pattern));
-   }
--  JavaThread* biased_locker() const {
-+  uintptr_t biased_locker_value() const {
-     assert(has_bias_pattern(), "should not call this otherwise");
--    return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
-+    return (intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)));
-+  }
-+  JavaThread* biased_locker_thread() const {
-+    assert(has_bias_pattern() && !is_biased_specially(), "should not call this otherwise");
-+    return (JavaThread*) biased_locker_value();
-+  }
-+  bool is_biased_to(Thread* thread) const {
-+    return biased_locker_value() == (uintptr_t) thread;
-+  }
-+  // Indicates that the bias bit is set but no JavaThread is assigned yet.
-+  bool is_biased_specially() const {
-+    return (has_bias_pattern() && (value() < min_thread_bias_value)); 
-   }
-   // Indicates that the mark has the bias bit set but that it has not
-   // yet been biased toward a particular thread
-   bool is_biased_anonymously() const {
--    return (has_bias_pattern() && (biased_locker() == NULL));
-+    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, biased_locking_prototype()->value());
-+    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == anonymous_bias_value)));
-+    assert(z == z2, "methods must agree");
-+    return z;
-+  }
-+  // Indicates that the mark has the bias bit set but is marked
-+  // as not biasable toward any particular thread.
-+  // When an object is in this state, it never leaves it,
-+  // except temporarily during the GC.
-+  // This state is in fact used to represent immutable ('final') objects.
-+  bool is_permanently_locked() const {
-+    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, permanently_locked_prototype()->value());
-+    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == permanent_lock_value)));
-+    assert(z == z2, "methods must agree");
-+    return z;
-+  }
-+  bool has_revocable_bias_pattern() const {
-+    return (has_bias_pattern() && !is_permanently_locked());
-   }
-   // Indicates epoch in which this bias was acquired. If the epoch
-   // changes due to too many bias revocations occurring, the biases
-@@ -207,20 +250,30 @@
-   }
-   // Prototype mark for initialization
-   static markOop biased_locking_prototype() {
--    return markOop( biased_lock_pattern );
-+    return markOop( anonymous_bias_value | biased_lock_pattern );
-+  }
-+  static markOop permanently_locked_prototype() {
-+    return markOop( permanent_lock_value | biased_lock_pattern );
-   }
- 
-   // lock accessors (note that these assume lock_shift == 0)
-   bool is_locked()   const {
--    return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
-+    return !mask_bits_match(value(), lock_mask_in_place, unlocked_value);
-   }
-   bool is_unlocked() const {
--    return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
-+    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
-   }
-   bool is_marked()   const {
--    return (mask_bits(value(), lock_mask_in_place) == marked_value);
-+    return mask_bits_match(value(), lock_mask_in_place, marked_value);
-   }
--  bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
-+  bool is_neutral()  const {
-+    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
-+  }
-+  bool is_unlocked_unhashed() const {   // is_unlocked() && !has_hash()
-+    return mask_bits_match(value(),
-+                           hash_mask_in_place | lock_mask_in_place,
-+                           no_hash_in_place   | unlocked_value);
-+  }
- 
-   // Special temporary state of the markOop while being inflated.
-   // Code that looks at mark outside a lock need to take this into account.
-@@ -345,8 +398,8 @@
-     return mask_bits(value() >> hash_shift, hash_mask);
-   }
- 
--  bool has_no_hash() const {
--    return hash() == no_hash;
-+  bool has_hash() const {
-+    return hash() != no_hash;
-   }
- 
-   // Prototype mark for initialization
-diff --git a/src/share/vm/oops/markOop.inline.hpp b/src/share/vm/oops/markOop.inline.hpp
---- a/src/share/vm/oops/markOop.inline.hpp
-+++ b/src/share/vm/oops/markOop.inline.hpp
-@@ -35,6 +35,7 @@
-   if (has_bias_pattern()) {
-     // Will reset bias at end of collection
-     // Mark words of biased and currently locked objects are preserved separately
-+    assert(!is_permanently_locked(), "caller resp.");
-     return false;
-   }
-   markOop prototype_header = prototype_for_object(obj_containing_mark);
-@@ -43,13 +44,18 @@
-     // true for correctness
-     return true;
-   }
--  return (!is_unlocked() || !has_no_hash());
-+  return !is_unlocked_unhashed();
- }
- 
- // Should this header be preserved during GC?
- inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
-+  if (EnableFinalObjects &&
-+      is_permanently_locked() &&
-+      prototype_for_object(obj_containing_mark)->is_permanently_locked())
-+    // The entire class is immutable.  GC will restore biasing via init_mark.
-+    return false;
-   if (!UseBiasedLocking)
--    return (!is_unlocked() || !has_no_hash());
-+    return !is_unlocked_unhashed();
-   return must_be_preserved_with_bias(obj_containing_mark);
- }
- 
-@@ -70,14 +76,19 @@
-       prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
-     return true;
-   }
--  return (!is_unlocked() || !has_no_hash());
-+  return !is_unlocked_unhashed();
- }
- 
- // Should this header be preserved in the case of a promotion failure
- // during scavenge?
- inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
-+  if (EnableFinalObjects &&
-+      is_permanently_locked() &&
-+      prototype_for_object(obj_containing_mark)->is_permanently_locked())
-+    // The entire class is immutable.  GC will restore biasing via init_mark.
-+    return false;
-   if (!UseBiasedLocking)
--    return (!is_unlocked() || !has_no_hash());
-+    return !is_unlocked_unhashed();
-   return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
- }
- 
-@@ -91,14 +102,19 @@
-       klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
-     return true;
-   }
--  return (!is_unlocked() || !has_no_hash());
-+  return !is_unlocked_unhashed();
- }
- 
- // Same as must_be_preserved_for_promotion_failure() except that
- // it takes a Klass* argument, instead of the object of which this is the mark word.
- inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
-+  if (EnableFinalObjects &&
-+      is_permanently_locked() &&
-+      klass_of_obj_containing_mark->prototype_header()->is_permanently_locked())
-+    // The entire class is immutable.  GC will restore biasing via init_mark.
-+    return false;
-   if (!UseBiasedLocking)
--    return (!is_unlocked() || !has_no_hash());
-+    return !is_unlocked_unhashed();
-   return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
- }
- 
-diff --git a/src/share/vm/oops/oop.cpp b/src/share/vm/oops/oop.cpp
---- a/src/share/vm/oops/oop.cpp
-+++ b/src/share/vm/oops/oop.cpp
-@@ -106,10 +106,10 @@
- 
- intptr_t oopDesc::slow_identity_hash() {
-   // slow case; we have to acquire the micro lock in order to locate the header
-+  Thread* THREAD = Thread::current();
-   ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
--  HandleMark hm;
--  Handle object(this);
--  return ObjectSynchronizer::identity_hash_value_for(object);
-+  HandleMark hm(THREAD);
-+  return ObjectSynchronizer::fast_hash_code(THREAD, this);
- }
- 
- // When String table needs to rehash
-diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
---- a/src/share/vm/oops/oop.hpp
-+++ b/src/share/vm/oops/oop.hpp
-@@ -284,6 +284,7 @@
-   bool is_locked()   const;
-   bool is_unlocked() const;
-   bool has_bias_pattern() const;
-+  bool is_permanently_locked() const;
- 
-   // asserts
-   bool is_oop(bool ignore_mark_word = false) const;
-diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
---- a/src/share/vm/oops/oop.inline.hpp
-+++ b/src/share/vm/oops/oop.inline.hpp
-@@ -602,6 +602,10 @@
-   return mark()->has_bias_pattern();
- }
- 
-+inline bool oopDesc::is_permanently_locked() const {
-+  return mark()->is_permanently_locked();
-+}
-+
- 
- // used only for asserts
- inline bool oopDesc::is_oop(bool ignore_mark_word) const {
-@@ -716,7 +720,7 @@
-   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
-   // Note: The mark must be read into local variable to avoid concurrent updates.
-   markOop mrk = mark();
--  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
-+  if (mrk->is_unlocked() && mrk->has_hash()) {
-     return mrk->hash();
-   } else if (mrk->is_marked()) {
-     return mrk->hash();
-diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
---- a/src/share/vm/prims/jvm.cpp
-+++ b/src/share/vm/prims/jvm.cpp
-@@ -512,10 +512,17 @@
- // java.lang.Object ///////////////////////////////////////////////
- 
- 
--JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle))
-+JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle)) {
-   JVMWrapper("JVM_IHashCode");
-   // as implemented in the classic virtual machine; return 0 if object is NULL
--  return handle == NULL ? 0 : ObjectSynchronizer::FastHashCode (THREAD, JNIHandles::resolve_non_null(handle)) ;
-+  if (handle == NULL)  return 0;
-+  oop obj = JNIHandles::resolve_non_null(handle);
-+  jint hc = ObjectSynchronizer::fast_hash_code(THREAD, obj);
-+  if (hc == markOopDesc::no_hash && CheckFinalObjects) {
-+    ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK_0);
-+  }
-+  return hc;
-+}
- JVM_END
- 
- 
-diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
---- a/src/share/vm/prims/unsafe.cpp
-+++ b/src/share/vm/prims/unsafe.cpp
-@@ -1140,6 +1140,33 @@
- UNSAFE_END
- 
- 
-+UNSAFE_ENTRY(jobject, Unsafe_LockPermanently(JNIEnv *env, jobject unsafe, jobject jobj))
-+  UnsafeWrapper("Unsafe_LockPermanently");
-+  {
-+    if (jobj == NULL) {
-+      THROW_0(vmSymbols::java_lang_NullPointerException());
-+    }
-+    oop obj = JNIHandles::resolve_non_null(jobj);
-+    obj = ObjectSynchronizer::lock_permanently(obj, CHECK_0);
-+    assert(obj->is_permanently_locked(), "must be now");
-+    if (obj != JNIHandles::resolve_non_null(jobj))
-+      jobj = JNIHandles::make_local(env, obj);
-+    return jobj;
-+  }
-+UNSAFE_END
-+
-+
-+UNSAFE_ENTRY(bool, Unsafe_IsPermanentlyLocked(JNIEnv *env, jobject unsafe, jobject jobj))
-+  UnsafeWrapper("Unsafe_IsPermanentlyLocked");
-+  {
-+    if (jobj == NULL) {
-+      THROW_0(vmSymbols::java_lang_NullPointerException());
-+    }
-+    return JNIHandles::resolve_non_null(jobj)->is_permanently_locked();
-+  }
-+UNSAFE_END
-+
-+
- UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr))
-   UnsafeWrapper("Unsafe_ThrowException");
-   {
-@@ -1582,6 +1609,11 @@
-     {CC"shouldBeInitialized",CC"("CLS")Z",               FN_PTR(Unsafe_ShouldBeInitialized)},
- };
- 
-+JNINativeMethod lockperm_methods[] = {
-+    {CC"lockPermanently",    CC"("OBJ")"OBJ,             FN_PTR(Unsafe_LockPermanently)},
-+    {CC"isPermanentlyLocked",CC"("OBJ")Z",               FN_PTR(Unsafe_IsPermanentlyLocked)},
-+};
-+
- #undef CC
- #undef FN_PTR
- 
-@@ -1661,6 +1693,15 @@
-         env->ExceptionClear();
-       }
-     }
-+    if (EnableFinalObjects) {
-+      env->RegisterNatives(unsafecls, lockperm_methods, sizeof(lockperm_methods)/sizeof(JNINativeMethod));
-+      if (env->ExceptionOccurred()) {
-+        if (PrintMiscellaneous && (Verbose || WizardMode)) {
-+          tty->print_cr("Warning:  support for EnableFinalObjects in Unsafe not found.");
-+        }
-+        env->ExceptionClear();
-+      }
-+    }
-     int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
-     if (env->ExceptionOccurred()) {
-       if (PrintMiscellaneous && (Verbose || WizardMode)) {
-diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp
---- a/src/share/vm/runtime/biasedLocking.cpp
-+++ b/src/share/vm/runtime/biasedLocking.cpp
-@@ -145,7 +145,7 @@
- 
- static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
-   markOop mark = obj->mark();
--  if (!mark->has_bias_pattern()) {
-+  if (!mark->has_revocable_bias_pattern()) {
-     if (TraceBiasedLocking) {
-       ResourceMark rm;
-       tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
-@@ -164,8 +164,9 @@
-                   (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
-   }
- 
--  JavaThread* biased_thread = mark->biased_locker();
--  if (biased_thread == NULL) {
-+  uintptr_t biased_thread_bits = mark->biased_locker_value();
-+  assert((JavaThread*) markOopDesc::anonymous_bias_value == NULL, "anonymous bias encoding must be ptr NULL");
-+  if (biased_thread_bits == markOopDesc::anonymous_bias_value) {
-     // Object is anonymously biased. We can get here if, for
-     // example, we revoke the bias due to an identity hash code
-     // being computed for an object.
-@@ -176,9 +177,16 @@
-       tty->print_cr("  Revoked bias of anonymously-biased object");
-     }
-     return BiasedLocking::BIAS_REVOKED;
-+  } else if (biased_thread_bits == markOopDesc::permanent_lock_value) {
-+    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
-+      tty->print_cr("  Cannot revoke bias of permanently-biased object");
-+    }
-+    assert(EnableFinalObjects, "this bit pattern is possible only if enabled");
-+    return BiasedLocking::PERMANENTLY_LOCKED;
-   }
- 
-   // Handle case where the thread toward which the object was biased has exited
-+  JavaThread* biased_thread = (JavaThread*) biased_thread_bits;
-   bool thread_is_alive = false;
-   if (requesting_thread == biased_thread) {
-     thread_is_alive = true;
-@@ -265,7 +273,7 @@
- 
- static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
-   markOop mark = o->mark();
--  if (!mark->has_bias_pattern()) {
-+  if (!mark->has_revocable_bias_pattern()) {
-     return HR_NOT_BIASED;
-   }
- 
-@@ -329,6 +337,9 @@
-                   (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
-   }
- 
-+  assert(!o->mark()->is_permanently_locked(),
-+         "should not revoke or rebias permanently biased object");
-+
-   jlong cur_time = os::javaTimeMillis();
-   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
- 
-@@ -347,7 +358,7 @@
-     // try to update the epoch -- assume another VM operation came in
-     // and reset the header to the unbiased state, which will
-     // implicitly cause all existing biases to be revoked
--    if (klass->prototype_header()->has_bias_pattern()) {
-+    if (klass->prototype_header()->has_revocable_bias_pattern()) {
-       int prev_epoch = klass->prototype_header()->bias_epoch();
-       klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
-       int cur_epoch = klass->prototype_header()->bias_epoch();
-@@ -360,7 +371,7 @@
-           MonitorInfo* mon_info = cached_monitor_info->at(i);
-           oop owner = mon_info->owner();
-           markOop mark = owner->mark();
--          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
-+          if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
-             // We might have encountered this object already in the case of recursive locking
-             assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
-             owner->set_mark(mark->set_bias_epoch(cur_epoch));
-@@ -371,7 +382,7 @@
- 
-     // At this point we're done. All we have to do is potentially
-     // adjust the header of the given object to revoke its bias.
--    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
-+    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_revocable_bias_pattern(), true, requesting_thread);
-   } else {
-     if (TraceBiasedLocking) {
-       ResourceMark rm;
-@@ -392,7 +403,7 @@
-         MonitorInfo* mon_info = cached_monitor_info->at(i);
-         oop owner = mon_info->owner();
-         markOop mark = owner->mark();
--        if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
-+        if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
-           revoke_bias(owner, false, true, requesting_thread);
-         }
-       }
-@@ -410,8 +421,8 @@
-   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
- 
-   if (attempt_rebias_of_object &&
--      o->mark()->has_bias_pattern() &&
--      klass->prototype_header()->has_bias_pattern()) {
-+      o->mark()->has_revocable_bias_pattern() &&
-+      klass->prototype_header()->has_revocable_bias_pattern()) {
-     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
-                                            klass->prototype_header()->bias_epoch());
-     o->set_mark(new_mark);
-@@ -421,8 +432,8 @@
-     }
-   }
- 
--  assert(!o->mark()->has_bias_pattern() ||
--         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
-+  assert(!o->mark()->has_revocable_bias_pattern() ||
-+         (attempt_rebias_of_object && (o->mark()->is_biased_to(requesting_thread))),
-          "bug in bulk bias revocation");
- 
-   return status_code;
-@@ -465,13 +476,13 @@
-     // there is nothing to do and we avoid a safepoint.
-     if (_obj != NULL) {
-       markOop mark = (*_obj)()->mark();
--      if (mark->has_bias_pattern()) {
-+      if (mark->has_revocable_bias_pattern()) {
-         return true;
-       }
-     } else {
-       for ( int i = 0 ; i < _objs->length(); i++ ) {
-         markOop mark = (_objs->at(i))()->mark();
--        if (mark->has_bias_pattern()) {
-+        if (mark->has_revocable_bias_pattern()) {
-           return true;
-         }
-       }
-@@ -545,9 +556,10 @@
-     if (res_mark == biased_value) {
-       return BIAS_REVOKED;
-     }
--  } else if (mark->has_bias_pattern()) {
-+  } else if (mark->has_revocable_bias_pattern()) {
-     Klass* k = Klass::cast(obj->klass());
-     markOop prototype_header = k->prototype_header();
-+    assert(!prototype_header->is_permanently_locked(), "object cannot be normal if klass is permanently biased");
-     if (!prototype_header->has_bias_pattern()) {
-       // This object has a stale bias from before the bulk revocation
-       // for this data type occurred. It's pointless to update the
-@@ -592,7 +604,7 @@
-   } else if (heuristics == HR_SINGLE_REVOKE) {
-     Klass *k = Klass::cast(obj->klass());
-     markOop prototype_header = k->prototype_header();
--    if (mark->biased_locker() == THREAD &&
-+    if (mark->is_biased_to(THREAD) &&
-         prototype_header->bias_epoch() == mark->bias_epoch()) {
-       // A thread is trying to revoke the bias of an object biased
-       // toward it, again likely due to an identity hash code
-diff --git a/src/share/vm/runtime/biasedLocking.hpp b/src/share/vm/runtime/biasedLocking.hpp
---- a/src/share/vm/runtime/biasedLocking.hpp
-+++ b/src/share/vm/runtime/biasedLocking.hpp
-@@ -161,7 +161,8 @@
-   enum Condition {
-     NOT_BIASED = 1,
-     BIAS_REVOKED = 2,
--    BIAS_REVOKED_AND_REBIASED = 3
-+    BIAS_REVOKED_AND_REBIASED = 3,
-+    PERMANENTLY_LOCKED = 4
-   };
- 
-   // This initialization routine should only be called once and
-diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
---- a/src/share/vm/runtime/deoptimization.cpp
-+++ b/src/share/vm/runtime/deoptimization.cpp
-@@ -940,12 +940,12 @@
-       assert(mon_info->owner() != NULL, "reallocation was missed");
-       Handle obj = Handle(mon_info->owner());
-       markOop mark = obj->mark();
--      if (UseBiasedLocking && mark->has_bias_pattern()) {
-+      if (UseBiasedLocking && mark->has_revocable_bias_pattern()) {
-         // New allocated objects may have the mark set to anonymously biased.
-         // Also the deoptimized method may called methods with synchronization
-         // where the thread-local object is bias locked to the current thread.
-         assert(mark->is_biased_anonymously() ||
--               mark->biased_locker() == thread, "should be locked to current thread");
-+               mark->is_biased_to(thread), "should be locked to current thread");
-         // Reset mark word to unbiased prototype.
-         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
-         obj->set_mark(unbiased_prototype);
-diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
---- a/src/share/vm/runtime/globals.hpp
-+++ b/src/share/vm/runtime/globals.hpp
-@@ -3565,6 +3565,12 @@
-   experimental(bool, TrustFinalNonStaticFields, false,                      \
-           "trust final non-static declarations for constant folding")       \
-                                                                             \
-+  experimental(bool, EnableFinalObjects, false,                             \
-+          "support objects which are fully immutable")                      \
-+                                                                            \
-+  experimental(bool, CheckFinalObjects, true,                               \
-+          "throw exceptions on illegal operations on immutable objects")    \
-+                                                                            \
-   develop(bool, TraceInvokeDynamic, false,                                  \
-           "trace internal invoke dynamic operations")                       \
-                                                                             \
-diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
---- a/src/share/vm/runtime/synchronizer.cpp
-+++ b/src/share/vm/runtime/synchronizer.cpp
-@@ -165,9 +165,20 @@
- // some assembly copies of this code. Make sure update those code
- // if the following function is changed. The implementation is
- // extremely sensitive to race condition. Be careful.
-+//
-+// In the interpreter, InterpreterGenerator::lock_method and
-+// TemplateTable::monitorenter both call masm->lock_object.
-+// The interpreter slow path calls InterpreterRuntime::monitorenter.
-+// In C1, inline copies are enabled by UseFastLocking.  LIR_Assembler::emit_lock
-+// calls masm->lock_object, and Runtime1::monitorenter provides the slow path.
-+// In C2, the Fast_Lock encoding calls masm->compiler_lock_object.
-+// (It may also inline the code straight into the AD file; yuck.)
-+// Some optimized instances of this code (in C2) refer to EmitSync.
-+// Some optimized instances in C2 vary depending on UseOptoBiasInlining.
-+// Some compiled slow paths go through SharedRuntime::complete_monitor_locking.
- 
- void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
-- if (UseBiasedLocking) {
-+  if (UseBiasedLocking) {
-     if (!SafepointSynchronize::is_at_safepoint()) {
-       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
-       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
-@@ -177,7 +188,7 @@
-       assert(!attempt_rebias, "can not rebias toward VM thread");
-       BiasedLocking::revoke_at_safepoint(obj);
-     }
--    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-+    assert(!obj->mark()->has_revocable_bias_pattern(), "biases should be revoked by now");
-  }
- 
-  slow_enter (obj, lock, THREAD) ;
-@@ -226,6 +237,9 @@
- // failed in the interpreter/compiler code.
- void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
-   markOop mark = obj->mark();
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(mark, obj(), CHECK);
-+  }
-   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
- 
-   if (mark->is_neutral()) {
-@@ -311,6 +325,9 @@
- void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
-   // the current locking is from JNI instead of Java code
-   TEVENT (jni_enter) ;
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -322,6 +339,9 @@
- 
- // NOTE: must use heavy weight monitor to handle jni monitor enter
- bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK_(false));
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -375,6 +395,9 @@
- //  Wait/Notify/NotifyAll
- // NOTE: must use heavy weight monitor to handle wait()
- void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -394,6 +417,9 @@
- }
- 
- void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -406,7 +432,10 @@
- }
- 
- void ObjectSynchronizer::notify(Handle obj, TRAPS) {
-- if (UseBiasedLocking) {
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
-+  }
-+  if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-   }
-@@ -420,6 +449,9 @@
- 
- // NOTE: see comment of notify()
- void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
-+  if (EnableFinalObjects) {
-+    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
-     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -601,8 +633,8 @@
-   return value;
- }
- //
--intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
--  if (UseBiasedLocking) {
-+intptr_t ObjectSynchronizer::fast_hash_code(Thread* THREAD, oop obj) {
-+  if (UseBiasedLocking || EnableFinalObjects) {
-     // NOTE: many places throughout the JVM do not expect a safepoint
-     // to be taken here, in particular most operations on perm gen
-     // objects. However, we only ever bias Java instances and all of
-@@ -610,14 +642,18 @@
-     // been checked to make sure they can handle a safepoint. The
-     // added check of the bias pattern is to avoid useless calls to
-     // thread-local storage.
--    if (obj->mark()->has_bias_pattern()) {
-+    markOop mark = obj->mark();
-+    //throw_if_locked_permanently(mark, obj, CATCH);  // cannot throw here
-+    if (mark->is_permanently_locked())
-+      return markOopDesc::no_hash;  // return null value to caller
-+    if (mark->has_bias_pattern()) {
-       // Box and unbox the raw reference just in case we cause a STW safepoint.
--      Handle hobj (Self, obj) ;
-+      Handle hobj(THREAD, obj);
-       // Relaxing assertion for bug 6320749.
-       assert (Universe::verify_in_progress() ||
-               !SafepointSynchronize::is_at_safepoint(),
-              "biases should not be seen by VM thread here");
--      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
-+      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
-       obj = hobj() ;
-       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-     }
-@@ -628,9 +664,9 @@
-   assert (Universe::verify_in_progress() ||
-           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
-   assert (Universe::verify_in_progress() ||
--          Self->is_Java_thread() , "invariant") ;
-+          THREAD->is_Java_thread() , "invariant") ;
-   assert (Universe::verify_in_progress() ||
--         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
-+         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
- 
-   ObjectMonitor* monitor = NULL;
-   markOop temp, test;
-@@ -645,7 +681,7 @@
-     if (hash) {                       // if it has hash, just return it
-       return hash;
-     }
--    hash = get_next_hash(Self, obj);  // allocate a new hash code
-+    hash = get_next_hash(THREAD, obj);  // allocate a new hash code
-     temp = mark->copy_set_hash(hash); // merge the hash code into header
-     // use (machine word version) atomic operation to install the hash
-     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
-@@ -664,7 +700,7 @@
-       return hash;
-     }
-     // Skip to the following code to reduce code size
--  } else if (Self->is_lock_owned((address)mark->locker())) {
-+  } else if (THREAD->is_lock_owned((address)mark->locker())) {
-     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
-     assert (temp->is_neutral(), "invariant") ;
-     hash = temp->hash();              // by current thread, check if the displaced
-@@ -683,13 +719,13 @@
-   }
- 
-   // Inflate the monitor to set hash code
--  monitor = ObjectSynchronizer::inflate(Self, obj);
-+  monitor = ObjectSynchronizer::inflate(THREAD, obj);
-   // Load displaced header and check it has hash code
-   mark = monitor->header();
-   assert (mark->is_neutral(), "invariant") ;
-   hash = mark->hash();
-   if (hash == 0) {
--    hash = get_next_hash(Self, obj);
-+    hash = get_next_hash(THREAD, obj);
-     temp = mark->copy_set_hash(hash); // merge hash code into header
-     assert (temp->is_neutral(), "invariant") ;
-     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
-@@ -706,15 +742,83 @@
-   return hash;
- }
- 
--// Deprecated -- use FastHashCode() instead.
-+// -----------------------------------------------------------------------------
-+// Permanently lock an object, and mark it immutable.
-+// This operation includes a releasing store to memory, to flush all final field values.
-+oop ObjectSynchronizer::lock_permanently(oop obj, TRAPS) {
-+  if (!EnableFinalObjects) {
-+    ResourceMark rm(THREAD);
-+    THROW_MSG_0(vmSymbols::java_lang_InternalError(), "EnableFinalObjects is false");
-+  }
-+  // Lock the object permanently.  This makes it immutable.
-+  markOop mark = obj->mark();
-+  if (mark->is_unlocked() ||
-+      mark->is_biased_anonymously()) {
-+    markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
-+    if ((markOop) Atomic::cmpxchg_ptr(perm_lock_mark, obj->mark_addr(), mark) == mark) {
-+      // cmpxchg_ptr includes store-release fence
-+      TEVENT (lock_permanently: fast path) ;
-+      return obj;
-+    }
-+  }
- 
--intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
--  return FastHashCode (Thread::current(), obj()) ;
-+  if (true) {
-+    // FIXME: Need to inflate and mess around some more.
-+    ResourceMark rm(THREAD);
-+    THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), "object is locked, cannot be permanently locked");
-+  }
-+
-+#if 0 //@@
-+  if (UseBiasedLocking) {
-+    if (mark->has_bias_pattern()) {
-+      // Box and unbox the raw reference just in case we cause a STW safepoint.
-+      Handle hobj(THREAD, obj);
-+      assert (Universe::verify_in_progress() ||
-+              !SafepointSynchronize::is_at_safepoint(),
-+             "biases should not be seen by VM thread here");
-+      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
-+      obj = hobj() ;
-+      mark = obj->mark();
-+    }
-+  }
-+
-+  assert (Universe::verify_in_progress() ||
-+          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
-+  assert (Universe::verify_in_progress() ||
-+          THREAD->is_Java_thread() , "invariant") ;
-+  assert (Universe::verify_in_progress() ||
-+         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
-+
-+  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
-+  mark = monitor->header();
-+  assert(mark->is_neutral(), "invariant") ;
-+  markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
-+  markOop test = (markOop) Atomic::cmpxchg_ptr(perm_lock_mark, monitor, mark);
-+  //@@ FIXME: the only updates to monitor header are (at present) hash code updates
-+  //@@ FIXME: must transition the inflated monitor to a permanently-locked state
-+  // When we call deflate_monitor at a safepoint, this must put the object into its proper state
-+#endif //@@
-+
-+  return obj;
-+}
-+
-+
-+// Throw an appropriate error if the object cannot be synchronized.
-+void ObjectSynchronizer::throw_if_locked_permanently(markOop mark, oop obj, TRAPS) {
-+  assert(EnableFinalObjects, "");
-+  if (mark->is_permanently_locked()) {
-+    ResourceMark rm(THREAD);
-+    THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "immutable object is permanently locked");
-+  }
- }
- 
- 
- bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
-                                                    Handle h_obj) {
-+  if (EnableFinalObjects) {
-+    if (h_obj->mark()->is_permanently_locked())
-+      return false;
-+  }
-   if (UseBiasedLocking) {
-     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
-     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
-@@ -753,6 +857,9 @@
- 
-   // Possible mark states: neutral, biased, stack-locked, inflated
- 
-+  if (EnableFinalObjects && h_obj()->mark()->is_permanently_locked()) {
-+    return owner_none;
-+  }
-   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
-     // CASE: biased
-     BiasedLocking::revoke_and_rebias(h_obj, false, self);
-@@ -787,6 +894,10 @@
- 
- // FIXME: jvmti should call this
- JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
-+  if (EnableFinalObjects) {
-+    if (h_obj->mark()->is_permanently_locked())
-+      return NULL;
-+  }
-   if (UseBiasedLocking) {
-     if (SafepointSynchronize::is_at_safepoint()) {
-       BiasedLocking::revoke_at_safepoint(h_obj);
-diff --git a/src/share/vm/runtime/synchronizer.hpp b/src/share/vm/runtime/synchronizer.hpp
---- a/src/share/vm/runtime/synchronizer.hpp
-+++ b/src/share/vm/runtime/synchronizer.hpp
-@@ -55,6 +55,9 @@
-   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
-   static void fast_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
- 
-+  // Lock the object permanently.  This makes it immutable.
-+  static oop lock_permanently(oop obj, TRAPS);
-+
-   // WARNING: They are ONLY used to handle the slow cases. They should
-   // only be used when the fast cases failed. Use of these functions
-   // without previous fast case check may cause fatal error.
-@@ -84,6 +87,7 @@
-   static void reenter            (Handle obj, intptr_t recursion, TRAPS);
- 
-   // thread-specific and global objectMonitor free list accessors
-+  // Self is the current thread, declared Thread* THREAD or TRAPS elsewhere.
- //  static void verifyInUse (Thread * Self) ; too slow for general assert/debug
-   static ObjectMonitor * omAlloc (Thread * Self) ;
-   static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
-@@ -96,12 +100,14 @@
- 
-   // Returns the identity hash value for an oop
-   // NOTE: It may cause monitor inflation
--  static intptr_t identity_hash_value_for(Handle obj);
--  static intptr_t FastHashCode (Thread * Self, oop obj) ;
-+  static intptr_t fast_hash_code(Thread* THREAD, oop obj);
-+
-+  // Throw an appropriate error if the object cannot be synchronized.
-+  static void throw_if_locked_permanently(markOop mark, oop obj, TRAPS);
- 
-   // java.lang.Thread support
--  static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
--  static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
-+  static bool current_thread_holds_lock(JavaThread* THREAD, Handle h_obj);
-+  static LockOwnership query_lock_ownership(JavaThread* THREAD, Handle h_obj);
- 
-   static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
- 
-diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
---- a/src/share/vm/utilities/globalDefinitions.hpp
-+++ b/src/share/vm/utilities/globalDefinitions.hpp
-@@ -959,7 +959,8 @@
- inline void clear_bits    (intptr_t& x, intptr_t m) { x &= ~m; }
- inline intptr_t mask_bits      (intptr_t  x, intptr_t m) { return x & m; }
- inline jlong    mask_long_bits (jlong     x, jlong    m) { return x & m; }
--inline bool mask_bits_are_true (intptr_t flags, intptr_t mask) { return (flags & mask) == mask; }
-+inline bool mask_bits_are_true (intptr_t flags, intptr_t mask)                 { return (flags & mask) == mask; }
-+inline bool mask_bits_match    (intptr_t flags, intptr_t mask, intptr_t value) { return (flags & mask) == value; }
- 
- // bit-operations using the n.th bit
- inline void    set_nth_bit(intptr_t& x, int n) { set_bits  (x, nth_bit(n)); }
--- a/value-obj.txt	Mon Oct 15 17:45:20 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-Infrastructure for immutable objects, in support of value types.
-Such objects are created privately mutable, and then locked for publication.
-See <http://blogs.oracle.com/jrose/entry/value_types_in_the_vm>
-and <http://blogs.oracle.com/jrose/entry/larval_objects_in_the_vm>.
-For more general background, see [Rich Hickey's talk on Values](http://www.infoq.com/presentations/Value-Values).
-
-The term _immutable_ is a general term for certain classes of data structures.
-Inside the JVM, we need a specific, positive term for an object which has been made immutable.
-We could say it has been made _final_ or _frozen_, but instead will repurpose the term _locked_.
-This is not a perfect choice, since immutability is only partially related to synchronization.
-The term allows intuitive API names like `Arrays.lockedCopyOf` or `Objects.cloneAsLocked`.
-An object which is immutable is called _permanently locked_, or (if there is no ambiguity) simply _locked_.
-
-Rules for permanently locked objects:
-
-- restrictions on classes of locked objects
-    - all non-static fields must be final
-    - there must be no finalizer method (no override to `Object.finalize`)
-    - these restrictions apply to any superclasses as well
-    - an array can be marked locked, but then (of course) its elements cannot be stored to
-    - if not an array, the object's class must implement the marker type `PermanentlyLockable` (is this a good idea?)
-- restricted operations on locked objects (could be enforced, or else documented as producing undefined results)
-    - do not use any astore or putfield instructions, nor their reflective equivalents, to change any field
-    - do not lock (you may get a hang or a LockedObjectException)
-    - do not test for pointer equality; use Object.equals instead (there may be a test for this)
-    - do not ask for an identity hash code; use Object.hashCode instead (there may be a test for this)
-    - do not call wait, notify, or notifyAll methods in Object
-    - at the time it is marked locked, an object's monitor must not be locked (in fact, should never have been?)
-- side effects
-    - elements of locked arrays are stably available to readers just like final object fields (i.e., there is a memory fence)
-    - a locked object can be locked again, with no additional effect
-    - any attempt to mutate a permanently locked object raises java.lang.LockedObjectException
-    - any attempt to synchronize on a permanently locked object raises java.lang.LockedObjectException
-- object lifecycle
-    - all objects are initially created in a normal (unlocked) state
-    - an object marked locked cannot be "unlocked" (reverted to a normal state)
-    - an object marked locked must be unreferenced by any other thread (can we enforce this?)
-    - the reference returned from the (unsafe) marking primitive must be used for all future accesses
-    - any previous references (including the one passed to the marking primitive) must be unused
-    - in practice, this means you must mark an object locked immediately after constructing it
-- API
-    - the method `lockPermanently` is used to lock an object permanently
-    - there is a predicate `isLockedPermanently` which can test whether an object is locked or not
-    - for initial experiments, these methods are in `sun.misc.Unsafe`; perhaps they belong on `Object` (cf. `clone`)