changeset 506:aa8f59e8372f

value-obj: first cut
author jrose
date Fri, 12 Oct 2012 12:35:47 -0700
parents b900129cef2e
children f3b15e2870c5
files final-obj.patch series value-obj.patch value-obj.txt
diffstat 4 files changed, 1086 insertions(+), 31 deletions(-) [+]
line wrap: on
line diff
--- a/final-obj.patch	Fri Oct 05 16:42:57 2012 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-Infrastructure for immutable objects supporting "value types".
-  See https://blogs.oracle.com/jrose/entry/value_types_in_the_vm
-Rules:
-- objects marked immutable cannot be changed, even via privileged reflection
-- a non-array can be marked immutable, but only if all of its fields are 'final'
-- an array can be marked immutable, but then (of course) its elements cannot be stored to
-- an object marked immutable must be unreferenced by any other thread
-- the reference returned from the (unsafe) marking primitive must be used for all future accesses
-- any previous references (including the one passed to the marking primitive) must be unused
-- in practice, this means you must mark an object immutable immediately after constructing it
-- at the time it is marked immutable, an object must not be locked (in fact, should never have been?)
-- an immutable object should not be locked (you may get a hang or an IllegalMonitorStateException)
-- an immutable object should not be tested for pointer equality (there may be a test for this)
-- an immutable object should not be asked for its identity hash code (there may be a test for this)
--- a/series	Fri Oct 05 16:42:57 2012 -0700
+++ b/series	Fri Oct 12 12:35:47 2012 -0700
@@ -3,6 +3,7 @@
 # review pending before push to hotspot-comp:
 
 # non-pushed files are under review or development, or merely experimental:
+value-obj.patch                 #-/meth #+bf2edd3c9b0f #-testable
 anno-stable.patch               #-/meth #+bf2edd3c9b0f #-testable
 meth.patch                      #-/meth #+bf2edd3c9b0f
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/value-obj.patch	Fri Oct 12 12:35:47 2012 -0700
@@ -0,0 +1,1054 @@
+Infrastructure for immutable objects, in support of value types.
+DONE:
+- implement object header representation (variation of biased lock)
+- enforce immutability in interpreter
+TO DO:
+- enforce immutability in JNI, Unsafe, and Core Reflection
+- enforce immutability in compiled code (C1, C2)
+- merge redundant checks (like null checks) before multiple xastore and putfield instructions (C1, C2)
+- constant-fold field and element values in locked objects (C1, C2), cf. stable arrays experiment
+- experiment with limiting effect of locking to parts of the class hierarchy (to allow type-based optimizations)
+- experiment lifting acmp and identityHashCode to equals and hashCode (i.e., method calls)
+- make the wrapper types (`Integer`, etc.) be permanently lockable, and have `valueOf` calls produce locked objects
+- make (selected) String instances be permanently locked?  (perhaps not; notion of interning may be too deeply embedded)
+- as a layer on top of this: value-oriented calling sequences (allowing arbitrary box/unbox ops at compiler discretion)
+- as a layer on top of this: customized species of `ArrayList<Integer>`, etc.
+
+diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp
+--- a/src/cpu/x86/vm/templateTable_x86_64.cpp
++++ b/src/cpu/x86/vm/templateTable_x86_64.cpp
+@@ -601,6 +601,11 @@
+   // destroys rbx
+   // check array
+   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
++  const Bytecodes::Code code = bytecode();
++  if (Bytecodes::is_memory_write(code)) {
++    assert_different_registers(array, index, rscratch1);
++    check_for_permanent_lock(array);  // uses rscratch1
++  }
+   // sign extend index for use by indexed load
+   __ movl2ptr(index, index);
+   // check index
+@@ -2237,6 +2242,24 @@
+   __ pop_ptr(r);
+   __ null_check(r);  // for field access must check obj.
+   __ verify_oop(r);
++  const Bytecodes::Code code = bytecode();
++  if (Bytecodes::is_memory_write(code)) {
++    check_for_permanent_lock(r);
++  }
++}
++
++void TemplateTable::check_for_permanent_lock(Register r) {
++  if (EnableFinalObjects && CheckFinalObjects) {
++    Label L;
++    Address mark(r, oopDesc::mark_offset_in_bytes());
++    __ movptr(mark, rscratch1);
++    __ andptr(rscratch1, markOopDesc::all_biased_lock_mask_bits);
++    __ cmpptr(rscratch1, (intptr_t) markOopDesc::permanently_locked_prototype());
++    __ jcc(Assembler::notEqual, L);
++    address fn = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_if_locked_permanently);
++    __ call_VM(r, fn, r);
++    __ bind(L);
++  }
+ }
+ 
+ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
+diff --git a/src/share/vm/interpreter/bytecodes.hpp b/src/share/vm/interpreter/bytecodes.hpp
+--- a/src/share/vm/interpreter/bytecodes.hpp
++++ b/src/share/vm/interpreter/bytecodes.hpp
+@@ -419,6 +419,7 @@
+                                                                              || code == _aload_2  || code == _aload_3); }
+   static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
+                                                                              || code == _astore_2 || code == _astore_3); }
++  static bool        is_memory_write(Code code)    { return (code == _putfield || (code >= _iastore && code <= _sastore)); }
+ 
+   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
+                                                            || code == _fconst_0 || code == _dconst_0); }
+diff --git a/src/share/vm/interpreter/interpreterRuntime.cpp b/src/share/vm/interpreter/interpreterRuntime.cpp
+--- a/src/share/vm/interpreter/interpreterRuntime.cpp
++++ b/src/share/vm/interpreter/interpreterRuntime.cpp
+@@ -489,6 +489,14 @@
+ IRT_END
+ 
+ 
++IRT_ENTRY(void, InterpreterRuntime::throw_if_locked_permanently(JavaThread* thread, oopDesc* obj)) {
++  Handle h_obj(thread, obj);
++  ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK);
++  thread->set_vm_result(h_obj());
++}
++IRT_END
++
++
+ //------------------------------------------------------------------------------------------------------------------------
+ // Fields
+ //
+diff --git a/src/share/vm/interpreter/interpreterRuntime.hpp b/src/share/vm/interpreter/interpreterRuntime.hpp
+--- a/src/share/vm/interpreter/interpreterRuntime.hpp
++++ b/src/share/vm/interpreter/interpreterRuntime.hpp
+@@ -107,6 +107,7 @@
+   static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
+   static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
+   static void    throw_pending_exception(JavaThread* thread);
++  static void    throw_if_locked_permanently(JavaThread* thread, oopDesc* obj);
+ 
+   // Statics & fields
+   static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
+diff --git a/src/share/vm/interpreter/templateTable.hpp b/src/share/vm/interpreter/templateTable.hpp
+--- a/src/share/vm/interpreter/templateTable.hpp
++++ b/src/share/vm/interpreter/templateTable.hpp
+@@ -303,6 +303,7 @@
+   static void getstatic(int byte_no);
+   static void putstatic(int byte_no);
+   static void pop_and_check_object(Register obj);
++  static void check_for_permanent_lock(Register obj);
+ 
+   static void _new();
+   static void newarray();
+diff --git a/src/share/vm/oops/markOop.cpp b/src/share/vm/oops/markOop.cpp
+--- a/src/share/vm/oops/markOop.cpp
++++ b/src/share/vm/oops/markOop.cpp
+@@ -46,6 +46,7 @@
+     assert(is_unlocked() || has_bias_pattern(), "just checking");
+     st->print("mark(");
+     if (has_bias_pattern())  st->print("biased,");
++    if (is_permanently_locked())  st->print("permanently_locked,");
+     st->print("hash %#lx,", hash());
+     st->print("age %d)", age());
+   }
+diff --git a/src/share/vm/oops/markOop.hpp b/src/share/vm/oops/markOop.hpp
+--- a/src/share/vm/oops/markOop.hpp
++++ b/src/share/vm/oops/markOop.hpp
+@@ -38,6 +38,7 @@
+ //  --------
+ //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
+ //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
++//             (bias flag):23 epoch:2 age:4    biased_lock:1 lock:2 (specially biased object)
+ //             size:32 ------------------------------------------>| (CMS free block)
+ //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
+ //
+@@ -45,13 +46,17 @@
+ //  --------
+ //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
+ //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
++//  (bias flag):54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (specially biased object)
+ //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
+ //  size:64 ----------------------------------------------------->| (CMS free block)
+ //
+-//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
+-//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
+-//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
+-//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
++//  COOPS (64-bit word, 32-bit pointer):
++//  ------------------------------------
++//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (normal object)
++//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (biased object)
++//  (bias flag):54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (specially biased object)
++//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (CMS promoted object)
++//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (CMS free block)
+ //
+ //  - hash contains the identity hash value: largest value is
+ //    31 bits, see os::random().  Also, 64-bit vm's require
+@@ -61,7 +66,9 @@
+ //
+ //  - the biased lock pattern is used to bias a lock toward a given
+ //    thread. When this pattern is set in the low three bits, the lock
+-//    is either biased toward a given thread or "anonymously" biased,
++//    is either biased toward a given thread or "specially" biased.
++//    Special biasing states are (1) permanent, meaning that the object
++//    can never be unlocked or rebiased, and (2) anonymous,
+ //    indicating that it is possible for it to be biased. When the
+ //    lock is biased toward a given thread, locking and unlocking can
+ //    be performed by that thread without using atomic operations.
+@@ -80,12 +87,13 @@
+ //    significant fraction of the eden semispaces and were not
+ //    promoted promptly, causing an increase in the amount of copying
+ //    performed. The runtime system aligns all JavaThread* pointers to
+-//    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
++//    a very large value (currently 2^9 bytes (32bVM) or 2^10 bytes (64bVM))
+ //    to make room for the age bits & the epoch bits (used in support of
+ //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
+ //
+ //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
+ //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
++//    [1           | epoch | age | 1 | 01]       lock is permanently biased
+ //
+ //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
+ //
+@@ -145,9 +153,16 @@
+   };
+ 
+   // Alignment of JavaThread pointers encoded in object header required by biased locking
+-  enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
++  // Also, special values for the bias value field
++  enum {
++    biased_lock_alignment    = 2 << (epoch_shift + epoch_bits),
++    anonymous_bias_value     = (0 * biased_lock_alignment), // must be zero (to allow bitwise OR)
++    permanent_lock_value     = (1 * biased_lock_alignment),
++    min_thread_bias_value    = (2 * biased_lock_alignment),
++    all_biased_lock_mask_bits =   (-biased_lock_alignment) | biased_lock_mask_in_place  // 0x...FFFE07
+   };
+ 
++
+ #ifdef _WIN64
+     // These values are too big for Win64
+     const static uintptr_t hash_mask = right_n_bits(hash_bits);
+@@ -179,16 +194,44 @@
+   // fixes up biased locks to be compatible with it when a bias is
+   // revoked.
+   bool has_bias_pattern() const {
+-    return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
++    return (mask_bits_match(value(), biased_lock_mask_in_place, biased_lock_pattern));
+   }
+-  JavaThread* biased_locker() const {
++  uintptr_t biased_locker_value() const {
+     assert(has_bias_pattern(), "should not call this otherwise");
+-    return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
++    return (intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)));
++  }
++  JavaThread* biased_locker_thread() const {
++    assert(has_bias_pattern() && !is_biased_specially(), "should not call this otherwise");
++    return (JavaThread*) biased_locker_value();
++  }
++  bool is_biased_to(Thread* thread) const {
++    return biased_locker_value() == (uintptr_t) thread;
++  }
++  // Indicates that the bias bit is set but no JavaThread is assigned yet.
++  bool is_biased_specially() const {
++    return (has_bias_pattern() && (value() < min_thread_bias_value)); 
+   }
+   // Indicates that the mark has the bias bit set but that it has not
+   // yet been biased toward a particular thread
+   bool is_biased_anonymously() const {
+-    return (has_bias_pattern() && (biased_locker() == NULL));
++    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, biased_locking_prototype()->value());
++    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == anonymous_bias_value)));
++    assert(z == z2, "methods must agree");
++    return z;
++  }
++  // Indicates that the mark has the bias bit set but is marked
++  // as not biasable toward any particular thread.
++  // When an object is in this state, it never leaves it,
++  // except temporarily during the GC.
++  // This state is in fact used to represent immutable ('final') objects.
++  bool is_permanently_locked() const {
++    bool z = mask_bits_match(value(), all_biased_lock_mask_bits, permanently_locked_prototype()->value());
++    DEBUG_ONLY(bool z2 = (has_bias_pattern() && (biased_locker_value() == permanent_lock_value)));
++    assert(z == z2, "methods must agree");
++    return z;
++  }
++  bool has_revocable_bias_pattern() const {
++    return (has_bias_pattern() && !is_permanently_locked());
+   }
+   // Indicates epoch in which this bias was acquired. If the epoch
+   // changes due to too many bias revocations occurring, the biases
+@@ -207,20 +250,30 @@
+   }
+   // Prototype mark for initialization
+   static markOop biased_locking_prototype() {
+-    return markOop( biased_lock_pattern );
++    return markOop( anonymous_bias_value | biased_lock_pattern );
++  }
++  static markOop permanently_locked_prototype() {
++    return markOop( permanent_lock_value | biased_lock_pattern );
+   }
+ 
+   // lock accessors (note that these assume lock_shift == 0)
+   bool is_locked()   const {
+-    return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
++    return !mask_bits_match(value(), lock_mask_in_place, unlocked_value);
+   }
+   bool is_unlocked() const {
+-    return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
++    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
+   }
+   bool is_marked()   const {
+-    return (mask_bits(value(), lock_mask_in_place) == marked_value);
++    return mask_bits_match(value(), lock_mask_in_place, marked_value);
+   }
+-  bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
++  bool is_neutral()  const {
++    return mask_bits_match(value(), biased_lock_mask_in_place, unlocked_value);
++  }
++  bool is_unlocked_unhashed() const {   // is_unlocked() && !has_hash()
++    return mask_bits_match(value(),
++                           hash_mask_in_place | lock_mask_in_place,
++                           no_hash_in_place   | unlocked_value);
++  }
+ 
+   // Special temporary state of the markOop while being inflated.
+   // Code that looks at mark outside a lock need to take this into account.
+@@ -345,8 +398,8 @@
+     return mask_bits(value() >> hash_shift, hash_mask);
+   }
+ 
+-  bool has_no_hash() const {
+-    return hash() == no_hash;
++  bool has_hash() const {
++    return hash() != no_hash;
+   }
+ 
+   // Prototype mark for initialization
+diff --git a/src/share/vm/oops/markOop.inline.hpp b/src/share/vm/oops/markOop.inline.hpp
+--- a/src/share/vm/oops/markOop.inline.hpp
++++ b/src/share/vm/oops/markOop.inline.hpp
+@@ -35,6 +35,7 @@
+   if (has_bias_pattern()) {
+     // Will reset bias at end of collection
+     // Mark words of biased and currently locked objects are preserved separately
++    assert(!is_permanently_locked(), "caller resp.");
+     return false;
+   }
+   markOop prototype_header = prototype_for_object(obj_containing_mark);
+@@ -43,13 +44,18 @@
+     // true for correctness
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Should this header be preserved during GC?
+ inline bool markOopDesc::must_be_preserved(oop obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      prototype_for_object(obj_containing_mark)->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias(obj_containing_mark);
+ }
+ 
+@@ -70,14 +76,19 @@
+       prototype_for_object(obj_containing_mark)->has_bias_pattern()) {
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Should this header be preserved in the case of a promotion failure
+ // during scavenge?
+ inline bool markOopDesc::must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      prototype_for_object(obj_containing_mark)->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
+ }
+ 
+@@ -91,14 +102,19 @@
+       klass_of_obj_containing_mark->prototype_header()->has_bias_pattern()) {
+     return true;
+   }
+-  return (!is_unlocked() || !has_no_hash());
++  return !is_unlocked_unhashed();
+ }
+ 
+ // Same as must_be_preserved_for_promotion_failure() except that
+ // it takes a Klass* argument, instead of the object of which this is the mark word.
+ inline bool markOopDesc::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
++  if (EnableFinalObjects &&
++      is_permanently_locked() &&
++      klass_of_obj_containing_mark->prototype_header()->is_permanently_locked())
++    // The entire class is immutable.  GC will restore biasing via init_mark.
++    return false;
+   if (!UseBiasedLocking)
+-    return (!is_unlocked() || !has_no_hash());
++    return !is_unlocked_unhashed();
+   return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
+ }
+ 
+diff --git a/src/share/vm/oops/oop.cpp b/src/share/vm/oops/oop.cpp
+--- a/src/share/vm/oops/oop.cpp
++++ b/src/share/vm/oops/oop.cpp
+@@ -106,10 +106,10 @@
+ 
+ intptr_t oopDesc::slow_identity_hash() {
+   // slow case; we have to acquire the micro lock in order to locate the header
++  Thread* THREAD = Thread::current();
+   ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
+-  HandleMark hm;
+-  Handle object(this);
+-  return ObjectSynchronizer::identity_hash_value_for(object);
++  HandleMark hm(THREAD);
++  return ObjectSynchronizer::fast_hash_code(THREAD, this);
+ }
+ 
+ // When String table needs to rehash
+diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
+--- a/src/share/vm/oops/oop.hpp
++++ b/src/share/vm/oops/oop.hpp
+@@ -284,6 +284,7 @@
+   bool is_locked()   const;
+   bool is_unlocked() const;
+   bool has_bias_pattern() const;
++  bool is_permanently_locked() const;
+ 
+   // asserts
+   bool is_oop(bool ignore_mark_word = false) const;
+diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
+--- a/src/share/vm/oops/oop.inline.hpp
++++ b/src/share/vm/oops/oop.inline.hpp
+@@ -602,6 +602,10 @@
+   return mark()->has_bias_pattern();
+ }
+ 
++inline bool oopDesc::is_permanently_locked() const {
++  return mark()->is_permanently_locked();
++}
++
+ 
+ // used only for asserts
+ inline bool oopDesc::is_oop(bool ignore_mark_word) const {
+@@ -716,7 +720,7 @@
+   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
+   // Note: The mark must be read into local variable to avoid concurrent updates.
+   markOop mrk = mark();
+-  if (mrk->is_unlocked() && !mrk->has_no_hash()) {
++  if (mrk->is_unlocked() && mrk->has_hash()) {
+     return mrk->hash();
+   } else if (mrk->is_marked()) {
+     return mrk->hash();
+diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
+--- a/src/share/vm/prims/jvm.cpp
++++ b/src/share/vm/prims/jvm.cpp
+@@ -512,10 +512,17 @@
+ // java.lang.Object ///////////////////////////////////////////////
+ 
+ 
+-JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle))
++JVM_ENTRY(jint, JVM_IHashCode(JNIEnv* env, jobject handle)) {
+   JVMWrapper("JVM_IHashCode");
+   // as implemented in the classic virtual machine; return 0 if object is NULL
+-  return handle == NULL ? 0 : ObjectSynchronizer::FastHashCode (THREAD, JNIHandles::resolve_non_null(handle)) ;
++  if (handle == NULL)  return 0;
++  oop obj = JNIHandles::resolve_non_null(handle);
++  jint hc = ObjectSynchronizer::fast_hash_code(THREAD, obj);
++  if (hc == markOopDesc::no_hash && CheckFinalObjects) {
++    ObjectSynchronizer::throw_if_locked_permanently(obj->mark(), obj, CHECK_0);
++  }
++  return hc;
++}
+ JVM_END
+ 
+ 
+diff --git a/src/share/vm/prims/unsafe.cpp b/src/share/vm/prims/unsafe.cpp
+--- a/src/share/vm/prims/unsafe.cpp
++++ b/src/share/vm/prims/unsafe.cpp
+@@ -1140,6 +1140,33 @@
+ UNSAFE_END
+ 
+ 
++UNSAFE_ENTRY(jobject, Unsafe_LockPermanently(JNIEnv *env, jobject unsafe, jobject jobj))
++  UnsafeWrapper("Unsafe_LockPermanently");
++  {
++    if (jobj == NULL) {
++      THROW_0(vmSymbols::java_lang_NullPointerException());
++    }
++    oop obj = JNIHandles::resolve_non_null(jobj);
++    obj = ObjectSynchronizer::lock_permanently(obj, CHECK_0);
++    assert(obj->is_permanently_locked(), "must be now");
++    if (obj != JNIHandles::resolve_non_null(jobj))
++      jobj = JNIHandles::make_local(env, obj);
++    return jobj;
++  }
++UNSAFE_END
++
++
++UNSAFE_ENTRY(bool, Unsafe_IsPermanentlyLocked(JNIEnv *env, jobject unsafe, jobject jobj))
++  UnsafeWrapper("Unsafe_IsPermanentlyLocked");
++  {
++    if (jobj == NULL) {
++      THROW_0(vmSymbols::java_lang_NullPointerException());
++    }
++    return JNIHandles::resolve_non_null(jobj)->is_permanently_locked();
++  }
++UNSAFE_END
++
++
+ UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr))
+   UnsafeWrapper("Unsafe_ThrowException");
+   {
+@@ -1582,6 +1609,11 @@
+     {CC"shouldBeInitialized",CC"("CLS")Z",               FN_PTR(Unsafe_ShouldBeInitialized)},
+ };
+ 
++JNINativeMethod lockperm_methods[] = {
++    {CC"lockPermanently",    CC"("OBJ")"OBJ,             FN_PTR(Unsafe_LockPermanently)},
++    {CC"isPermanentlyLocked",CC"("OBJ")Z",               FN_PTR(Unsafe_IsPermanentlyLocked)},
++};
++
+ #undef CC
+ #undef FN_PTR
+ 
+@@ -1661,6 +1693,15 @@
+         env->ExceptionClear();
+       }
+     }
++    if (EnableFinalObjects) {
++      env->RegisterNatives(unsafecls, lockperm_methods, sizeof(lockperm_methods)/sizeof(JNINativeMethod));
++      if (env->ExceptionOccurred()) {
++        if (PrintMiscellaneous && (Verbose || WizardMode)) {
++          tty->print_cr("Warning:  support for EnableFinalObjects in Unsafe not found.");
++        }
++        env->ExceptionClear();
++      }
++    }
+     int status = env->RegisterNatives(unsafecls, methods, sizeof(methods)/sizeof(JNINativeMethod));
+     if (env->ExceptionOccurred()) {
+       if (PrintMiscellaneous && (Verbose || WizardMode)) {
+diff --git a/src/share/vm/runtime/biasedLocking.cpp b/src/share/vm/runtime/biasedLocking.cpp
+--- a/src/share/vm/runtime/biasedLocking.cpp
++++ b/src/share/vm/runtime/biasedLocking.cpp
+@@ -145,7 +145,7 @@
+ 
+ static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
+   markOop mark = obj->mark();
+-  if (!mark->has_bias_pattern()) {
++  if (!mark->has_revocable_bias_pattern()) {
+     if (TraceBiasedLocking) {
+       ResourceMark rm;
+       tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
+@@ -164,8 +164,9 @@
+                   (intptr_t) obj, (intptr_t) mark, Klass::cast(obj->klass())->external_name(), (intptr_t) Klass::cast(obj->klass())->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
+   }
+ 
+-  JavaThread* biased_thread = mark->biased_locker();
+-  if (biased_thread == NULL) {
++  uintptr_t biased_thread_bits = mark->biased_locker_value();
++  assert((JavaThread*) markOopDesc::anonymous_bias_value == NULL, "anonymous bias encoding must be ptr NULL");
++  if (biased_thread_bits == markOopDesc::anonymous_bias_value) {
+     // Object is anonymously biased. We can get here if, for
+     // example, we revoke the bias due to an identity hash code
+     // being computed for an object.
+@@ -176,9 +177,16 @@
+       tty->print_cr("  Revoked bias of anonymously-biased object");
+     }
+     return BiasedLocking::BIAS_REVOKED;
++  } else if (biased_thread_bits == markOopDesc::permanent_lock_value) {
++    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
++      tty->print_cr("  Cannot revoke bias of permanently-biased object");
++    }
++    assert(EnableFinalObjects, "this bit pattern is possible only if enabled");
++    return BiasedLocking::PERMANENTLY_LOCKED;
+   }
+ 
+   // Handle case where the thread toward which the object was biased has exited
++  JavaThread* biased_thread = (JavaThread*) biased_thread_bits;
+   bool thread_is_alive = false;
+   if (requesting_thread == biased_thread) {
+     thread_is_alive = true;
+@@ -265,7 +273,7 @@
+ 
+ static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
+   markOop mark = o->mark();
+-  if (!mark->has_bias_pattern()) {
++  if (!mark->has_revocable_bias_pattern()) {
+     return HR_NOT_BIASED;
+   }
+ 
+@@ -329,6 +337,9 @@
+                   (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name());
+   }
+ 
++  assert(!o->mark()->is_permanently_locked(),
++         "should not revoke or rebias permanently biased object");
++
+   jlong cur_time = os::javaTimeMillis();
+   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
+ 
+@@ -347,7 +358,7 @@
+     // try to update the epoch -- assume another VM operation came in
+     // and reset the header to the unbiased state, which will
+     // implicitly cause all existing biases to be revoked
+-    if (klass->prototype_header()->has_bias_pattern()) {
++    if (klass->prototype_header()->has_revocable_bias_pattern()) {
+       int prev_epoch = klass->prototype_header()->bias_epoch();
+       klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
+       int cur_epoch = klass->prototype_header()->bias_epoch();
+@@ -360,7 +371,7 @@
+           MonitorInfo* mon_info = cached_monitor_info->at(i);
+           oop owner = mon_info->owner();
+           markOop mark = owner->mark();
+-          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
++          if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
+             // We might have encountered this object already in the case of recursive locking
+             assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
+             owner->set_mark(mark->set_bias_epoch(cur_epoch));
+@@ -371,7 +382,7 @@
+ 
+     // At this point we're done. All we have to do is potentially
+     // adjust the header of the given object to revoke its bias.
+-    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
++    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_revocable_bias_pattern(), true, requesting_thread);
+   } else {
+     if (TraceBiasedLocking) {
+       ResourceMark rm;
+@@ -392,7 +403,7 @@
+         MonitorInfo* mon_info = cached_monitor_info->at(i);
+         oop owner = mon_info->owner();
+         markOop mark = owner->mark();
+-        if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
++        if ((owner->klass() == k_o) && mark->has_revocable_bias_pattern()) {
+           revoke_bias(owner, false, true, requesting_thread);
+         }
+       }
+@@ -410,8 +421,8 @@
+   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
+ 
+   if (attempt_rebias_of_object &&
+-      o->mark()->has_bias_pattern() &&
+-      klass->prototype_header()->has_bias_pattern()) {
++      o->mark()->has_revocable_bias_pattern() &&
++      klass->prototype_header()->has_revocable_bias_pattern()) {
+     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
+                                            klass->prototype_header()->bias_epoch());
+     o->set_mark(new_mark);
+@@ -421,8 +432,8 @@
+     }
+   }
+ 
+-  assert(!o->mark()->has_bias_pattern() ||
+-         (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
++  assert(!o->mark()->has_revocable_bias_pattern() ||
++         (attempt_rebias_of_object && (o->mark()->is_biased_to(requesting_thread))),
+          "bug in bulk bias revocation");
+ 
+   return status_code;
+@@ -465,13 +476,13 @@
+     // there is nothing to do and we avoid a safepoint.
+     if (_obj != NULL) {
+       markOop mark = (*_obj)()->mark();
+-      if (mark->has_bias_pattern()) {
++      if (mark->has_revocable_bias_pattern()) {
+         return true;
+       }
+     } else {
+       for ( int i = 0 ; i < _objs->length(); i++ ) {
+         markOop mark = (_objs->at(i))()->mark();
+-        if (mark->has_bias_pattern()) {
++        if (mark->has_revocable_bias_pattern()) {
+           return true;
+         }
+       }
+@@ -545,9 +556,10 @@
+     if (res_mark == biased_value) {
+       return BIAS_REVOKED;
+     }
+-  } else if (mark->has_bias_pattern()) {
++  } else if (mark->has_revocable_bias_pattern()) {
+     Klass* k = Klass::cast(obj->klass());
+     markOop prototype_header = k->prototype_header();
++    assert(!prototype_header->is_permanently_locked(), "object cannot be normal if klass is permanently biased");
+     if (!prototype_header->has_bias_pattern()) {
+       // This object has a stale bias from before the bulk revocation
+       // for this data type occurred. It's pointless to update the
+@@ -592,7 +604,7 @@
+   } else if (heuristics == HR_SINGLE_REVOKE) {
+     Klass *k = Klass::cast(obj->klass());
+     markOop prototype_header = k->prototype_header();
+-    if (mark->biased_locker() == THREAD &&
++    if (mark->is_biased_to(THREAD) &&
+         prototype_header->bias_epoch() == mark->bias_epoch()) {
+       // A thread is trying to revoke the bias of an object biased
+       // toward it, again likely due to an identity hash code
+diff --git a/src/share/vm/runtime/biasedLocking.hpp b/src/share/vm/runtime/biasedLocking.hpp
+--- a/src/share/vm/runtime/biasedLocking.hpp
++++ b/src/share/vm/runtime/biasedLocking.hpp
+@@ -161,7 +161,8 @@
+   enum Condition {
+     NOT_BIASED = 1,
+     BIAS_REVOKED = 2,
+-    BIAS_REVOKED_AND_REBIASED = 3
++    BIAS_REVOKED_AND_REBIASED = 3,
++    PERMANENTLY_LOCKED = 4
+   };
+ 
+   // This initialization routine should only be called once and
+diff --git a/src/share/vm/runtime/deoptimization.cpp b/src/share/vm/runtime/deoptimization.cpp
+--- a/src/share/vm/runtime/deoptimization.cpp
++++ b/src/share/vm/runtime/deoptimization.cpp
+@@ -940,12 +940,12 @@
+       assert(mon_info->owner() != NULL, "reallocation was missed");
+       Handle obj = Handle(mon_info->owner());
+       markOop mark = obj->mark();
+-      if (UseBiasedLocking && mark->has_bias_pattern()) {
++      if (UseBiasedLocking && mark->has_revocable_bias_pattern()) {
+         // New allocated objects may have the mark set to anonymously biased.
+         // Also the deoptimized method may called methods with synchronization
+         // where the thread-local object is bias locked to the current thread.
+         assert(mark->is_biased_anonymously() ||
+-               mark->biased_locker() == thread, "should be locked to current thread");
++               mark->is_biased_to(thread), "should be locked to current thread");
+         // Reset mark word to unbiased prototype.
+         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+         obj->set_mark(unbiased_prototype);
+diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
+--- a/src/share/vm/runtime/globals.hpp
++++ b/src/share/vm/runtime/globals.hpp
+@@ -3565,6 +3565,12 @@
+   experimental(bool, TrustFinalNonStaticFields, false,                      \
+           "trust final non-static declarations for constant folding")       \
+                                                                             \
++  experimental(bool, EnableFinalObjects, false,                             \
++          "support objects which are fully immutable")                      \
++                                                                            \
++  experimental(bool, CheckFinalObjects, true,                               \
++          "throw exceptions on illegal operations on immutable objects")    \
++                                                                            \
+   develop(bool, TraceInvokeDynamic, false,                                  \
+           "trace internal invoke dynamic operations")                       \
+                                                                             \
+diff --git a/src/share/vm/runtime/synchronizer.cpp b/src/share/vm/runtime/synchronizer.cpp
+--- a/src/share/vm/runtime/synchronizer.cpp
++++ b/src/share/vm/runtime/synchronizer.cpp
+@@ -165,9 +165,20 @@
+ // some assembly copies of this code. Make sure update those code
+ // if the following function is changed. The implementation is
+ // extremely sensitive to race condition. Be careful.
++//
++// In the interpreter, InterpreterGenerator::lock_method and
++// TemplateTable::monitorenter both call masm->lock_object.
++// The interpreter slow path calls InterpreterRuntime::monitorenter.
++// In C1, inline copies are enabled by UseFastLocking.  LIR_Assembler::emit_lock
++// calls masm->lock_object, and Runtime1::monitorenter provides the slow path.
++// In C2, the Fast_Lock encoding calls masm->compiler_lock_object.
++// (It may also inline the code straight into the AD file; yuck.)
++// Some optimized instances of this code (in C2) refer to EmitSync.
++// Some optimized instances in C2 vary depending on UseOptoBiasInlining.
++// Some compiled slow paths go through SharedRuntime::complete_monitor_locking.
+ 
+ void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
+- if (UseBiasedLocking) {
++  if (UseBiasedLocking) {
+     if (!SafepointSynchronize::is_at_safepoint()) {
+       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
+       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
+@@ -177,7 +188,7 @@
+       assert(!attempt_rebias, "can not rebias toward VM thread");
+       BiasedLocking::revoke_at_safepoint(obj);
+     }
+-    assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
++    assert(!obj->mark()->has_revocable_bias_pattern(), "biases should be revoked by now");
+  }
+ 
+  slow_enter (obj, lock, THREAD) ;
+@@ -226,6 +237,9 @@
+ // failed in the interpreter/compiler code.
+ void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
+   markOop mark = obj->mark();
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(mark, obj(), CHECK);
++  }
+   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
+ 
+   if (mark->is_neutral()) {
+@@ -311,6 +325,9 @@
+ void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter
+   // the current locking is from JNI instead of Java code
+   TEVENT (jni_enter) ;
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -322,6 +339,9 @@
+ 
+ // NOTE: must use heavy weight monitor to handle jni monitor enter
+ bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK_(false));
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -375,6 +395,9 @@
+ //  Wait/Notify/NotifyAll
+ // NOTE: must use heavy weight monitor to handle wait()
+ void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -394,6 +417,9 @@
+ }
+ 
+ void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -406,7 +432,10 @@
+ }
+ 
+ void ObjectSynchronizer::notify(Handle obj, TRAPS) {
+- if (UseBiasedLocking) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
++  if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+   }
+@@ -420,6 +449,9 @@
+ 
+ // NOTE: see comment of notify()
+ void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
++  if (EnableFinalObjects) {
++    throw_if_locked_permanently(obj->mark(), obj(), CHECK);
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
+     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -601,8 +633,8 @@
+   return value;
+ }
+ //
+-intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
+-  if (UseBiasedLocking) {
++intptr_t ObjectSynchronizer::fast_hash_code(Thread* THREAD, oop obj) {
++  if (UseBiasedLocking || EnableFinalObjects) {
+     // NOTE: many places throughout the JVM do not expect a safepoint
+     // to be taken here, in particular most operations on perm gen
+     // objects. However, we only ever bias Java instances and all of
+@@ -610,14 +642,18 @@
+     // been checked to make sure they can handle a safepoint. The
+     // added check of the bias pattern is to avoid useless calls to
+     // thread-local storage.
+-    if (obj->mark()->has_bias_pattern()) {
++    markOop mark = obj->mark();
++    //throw_if_locked_permanently(mark, obj, CATCH);  // cannot throw here
++    if (mark->is_permanently_locked())
++      return markOopDesc::no_hash;  // return null value to caller
++    if (mark->has_bias_pattern()) {
+       // Box and unbox the raw reference just in case we cause a STW safepoint.
+-      Handle hobj (Self, obj) ;
++      Handle hobj(THREAD, obj);
+       // Relaxing assertion for bug 6320749.
+       assert (Universe::verify_in_progress() ||
+               !SafepointSynchronize::is_at_safepoint(),
+              "biases should not be seen by VM thread here");
+-      BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
++      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
+       obj = hobj() ;
+       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+     }
+@@ -628,9 +664,9 @@
+   assert (Universe::verify_in_progress() ||
+           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-          Self->is_Java_thread() , "invariant") ;
++          THREAD->is_Java_thread() , "invariant") ;
+   assert (Universe::verify_in_progress() ||
+-         ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ;
++         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
+ 
+   ObjectMonitor* monitor = NULL;
+   markOop temp, test;
+@@ -645,7 +681,7 @@
+     if (hash) {                       // if it has hash, just return it
+       return hash;
+     }
+-    hash = get_next_hash(Self, obj);  // allocate a new hash code
++    hash = get_next_hash(THREAD, obj);  // allocate a new hash code
+     temp = mark->copy_set_hash(hash); // merge the hash code into header
+     // use (machine word version) atomic operation to install the hash
+     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
+@@ -664,7 +700,7 @@
+       return hash;
+     }
+     // Skip to the following code to reduce code size
+-  } else if (Self->is_lock_owned((address)mark->locker())) {
++  } else if (THREAD->is_lock_owned((address)mark->locker())) {
+     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
+     assert (temp->is_neutral(), "invariant") ;
+     hash = temp->hash();              // by current thread, check if the displaced
+@@ -683,13 +719,13 @@
+   }
+ 
+   // Inflate the monitor to set hash code
+-  monitor = ObjectSynchronizer::inflate(Self, obj);
++  monitor = ObjectSynchronizer::inflate(THREAD, obj);
+   // Load displaced header and check it has hash code
+   mark = monitor->header();
+   assert (mark->is_neutral(), "invariant") ;
+   hash = mark->hash();
+   if (hash == 0) {
+-    hash = get_next_hash(Self, obj);
++    hash = get_next_hash(THREAD, obj);
+     temp = mark->copy_set_hash(hash); // merge hash code into header
+     assert (temp->is_neutral(), "invariant") ;
+     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
+@@ -706,15 +742,83 @@
+   return hash;
+ }
+ 
+-// Deprecated -- use FastHashCode() instead.
++// -----------------------------------------------------------------------------
++// Permanently lock an object, and mark it immutable.
++// This operation includes a releasing store to memory, to flush all final field values.
++oop ObjectSynchronizer::lock_permanently(oop obj, TRAPS) {
++  if (!EnableFinalObjects) {
++    ResourceMark rm(THREAD);
++    THROW_MSG_0(vmSymbols::java_lang_InternalError(), "EnableFinalObjects is false");
++  }
++  // Lock the object permanently.  This makes it immutable.
++  markOop mark = obj->mark();
++  if (mark->is_unlocked() ||
++      mark->is_biased_anonymously()) {
++    markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
++    if ((markOop) Atomic::cmpxchg_ptr(perm_lock_mark, obj->mark_addr(), mark) == mark) {
++      // cmpxchg_ptr includes store-release fence
++      TEVENT (lock_permanently: fast path) ;
++      return obj;
++    }
++  }
+ 
+-intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
+-  return FastHashCode (Thread::current(), obj()) ;
++  if (true) {
++    // FIXME: Need to inflate and mess around some more.
++    ResourceMark rm(THREAD);
++    THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), "object is locked, cannot be permanently locked");
++  }
++
++#if 0 //@@
++  if (UseBiasedLocking) {
++    if (mark->has_bias_pattern()) {
++      // Box and unbox the raw reference just in case we cause a STW safepoint.
++      Handle hobj(THREAD, obj);
++      assert (Universe::verify_in_progress() ||
++              !SafepointSynchronize::is_at_safepoint(),
++             "biases should not be seen by VM thread here");
++      BiasedLocking::revoke_and_rebias(hobj, false, THREAD);
++      obj = hobj() ;
++      mark = obj->mark();
++    }
++  }
++
++  assert (Universe::verify_in_progress() ||
++          !SafepointSynchronize::is_at_safepoint(), "invariant") ;
++  assert (Universe::verify_in_progress() ||
++          THREAD->is_Java_thread() , "invariant") ;
++  assert (Universe::verify_in_progress() ||
++         ((JavaThread *)THREAD)->thread_state() != _thread_blocked, "invariant") ;
++
++  ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj);
++  mark = monitor->header();
++  assert(mark->is_neutral(), "invariant") ;
++  markOop perm_lock_mark = markOopDesc::permanently_locked_prototype()->set_age(mark->age());
++  markOop test = (markOop) Atomic::cmpxchg_ptr(perm_lock_mark, monitor, mark);
++  //@@ FIXME: the only updates to monitor header are (at present) hash code updates
++  //@@ FIXME: must transition the inflated monitor to a permanently-locked state
++  // When we call deflate_monitor at a safepoint, this must put the object into its proper state
++#endif //@@
++
++  return obj;
++}
++
++
++// Throw an appropriate error if the object cannot be synchronized.
++void ObjectSynchronizer::throw_if_locked_permanently(markOop mark, oop obj, TRAPS) {
++  assert(EnableFinalObjects, "");
++  if (mark->is_permanently_locked()) {
++    ResourceMark rm(THREAD);
++    THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "immutable object is permanently locked");
++  }
+ }
+ 
+ 
+ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
+                                                    Handle h_obj) {
++  if (EnableFinalObjects) {
++    if (h_obj->mark()->is_permanently_locked())
++      return false;
++  }
+   if (UseBiasedLocking) {
+     BiasedLocking::revoke_and_rebias(h_obj, false, thread);
+     assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
+@@ -753,6 +857,9 @@
+ 
+   // Possible mark states: neutral, biased, stack-locked, inflated
+ 
++  if (EnableFinalObjects && h_obj()->mark()->is_permanently_locked()) {
++    return owner_none;
++  }
+   if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) {
+     // CASE: biased
+     BiasedLocking::revoke_and_rebias(h_obj, false, self);
+@@ -787,6 +894,10 @@
+ 
+ // FIXME: jvmti should call this
+ JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) {
++  if (EnableFinalObjects) {
++    if (h_obj->mark()->is_permanently_locked())
++      return NULL;
++  }
+   if (UseBiasedLocking) {
+     if (SafepointSynchronize::is_at_safepoint()) {
+       BiasedLocking::revoke_at_safepoint(h_obj);
+diff --git a/src/share/vm/runtime/synchronizer.hpp b/src/share/vm/runtime/synchronizer.hpp
+--- a/src/share/vm/runtime/synchronizer.hpp
++++ b/src/share/vm/runtime/synchronizer.hpp
+@@ -55,6 +55,9 @@
+   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
+   static void fast_exit   (oop obj,    BasicLock* lock, Thread* THREAD);
+ 
++  // Lock the object permanently.  This makes it immutable.
++  static oop lock_permanently(oop obj, TRAPS);
++
+   // WARNING: They are ONLY used to handle the slow cases. They should
+   // only be used when the fast cases failed. Use of these functions
+   // without previous fast case check may cause fatal error.
+@@ -84,6 +87,7 @@
+   static void reenter            (Handle obj, intptr_t recursion, TRAPS);
+ 
+   // thread-specific and global objectMonitor free list accessors
++  // Self is the current thread, declared Thread* THREAD or TRAPS elsewhere.
+ //  static void verifyInUse (Thread * Self) ; too slow for general assert/debug
+   static ObjectMonitor * omAlloc (Thread * Self) ;
+   static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
+@@ -96,12 +100,14 @@
+ 
+   // Returns the identity hash value for an oop
+   // NOTE: It may cause monitor inflation
+-  static intptr_t identity_hash_value_for(Handle obj);
+-  static intptr_t FastHashCode (Thread * Self, oop obj) ;
++  static intptr_t fast_hash_code(Thread* THREAD, oop obj);
++
++  // Throw an appropriate error if the object cannot be synchronized.
++  static void throw_if_locked_permanently(markOop mark, oop obj, TRAPS);
+ 
+   // java.lang.Thread support
+-  static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj);
+-  static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj);
++  static bool current_thread_holds_lock(JavaThread* THREAD, Handle h_obj);
++  static LockOwnership query_lock_ownership(JavaThread* THREAD, Handle h_obj);
+ 
+   static JavaThread* get_lock_owner(Handle h_obj, bool doLock);
+ 
+diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
+--- a/src/share/vm/utilities/globalDefinitions.hpp
++++ b/src/share/vm/utilities/globalDefinitions.hpp
+@@ -959,7 +959,8 @@
+ inline void clear_bits    (intptr_t& x, intptr_t m) { x &= ~m; }
+ inline intptr_t mask_bits      (intptr_t  x, intptr_t m) { return x & m; }
+ inline jlong    mask_long_bits (jlong     x, jlong    m) { return x & m; }
+-inline bool mask_bits_are_true (intptr_t flags, intptr_t mask) { return (flags & mask) == mask; }
++inline bool mask_bits_are_true (intptr_t flags, intptr_t mask)                 { return (flags & mask) == mask; }
++inline bool mask_bits_match    (intptr_t flags, intptr_t mask, intptr_t value) { return (flags & mask) == value; }
+ 
+ // bit-operations using the n.th bit
+ inline void    set_nth_bit(intptr_t& x, int n) { set_bits  (x, nth_bit(n)); }
--- a/value-obj.txt	Fri Oct 05 16:42:57 2012 -0700
+++ b/value-obj.txt	Fri Oct 12 12:35:47 2012 -0700
@@ -1,30 +1,44 @@
 Infrastructure for immutable objects, in support of value types.
+Such objects are created privately mutable, and then locked for publication.
 See <http://blogs.oracle.com/jrose/entry/value_types_in_the_vm>
+and <http://blogs.oracle.com/jrose/entry/larval_objects_in_the_vm>.
+For more general background, see [Rich Hickey's talk on Values](http://www.infoq.com/presentations/Value-Values).
 
 The term _immutable_ is a general term for certain classes of data structures.
 Inside the JVM, we need a specific, positive term for an object which has been made immutable.
-We could say it has been _locked_ or _frozen_, but instead will repurpose the term _final_.
-This is not a perfect choice, since it is not related to finalizers.
-But it does allow intuitive API names like `Arrays.finalCopyOf` or `Objects.cloneAsFinal`.
-It will now apply in a consistent way, to individual objects, in addition to their fields.
+We could say it has been made _final_ or _frozen_, but instead will repurpose the term _locked_.
+This is not a perfect choice, since immutability is only partially related to synchronization.
+The term allows intuitive API names like `Arrays.lockedCopyOf` or `Objects.cloneAsLocked`.
+An object which is immutable is called _permanently locked_, or (if there is no ambiguity) simply _locked_.
 
-Rules:
+Rules for permanently locked objects:
 
-- restrictions on classes of final objects
-    - all fields must be final (what about static fields?)
-    - there must be no finalizer method (no override to Object.finalize)
-    - an array can be marked final, but then (of course) its elements cannot be stored to
-- restricted operations on final objects (could be enforced, or else documented as producing undefined results)
+- restrictions on classes of locked objects
+    - all non-static fields must be final
+    - there must be no finalizer method (no override to `Object.finalize`)
+    - these restrictions apply to any superclasses as well
+    - an array can be marked locked, but then (of course) its elements cannot be stored to
+    - if not an array, the object's class must implement the marker type `PermanentlyLockable` (is this a good idea?)
+- restricted operations on locked objects (could be enforced, or else documented as producing undefined results)
     - do not use any astore or putfield instructions, nor their reflective equivalents, to change any field
-    - do not lock (you may get a hang or an IllegalMonitorStateException)
+    - do not lock (you may get a hang or a LockedObjectException)
     - do not test for pointer equality; use Object.equals instead (there may be a test for this)
     - do not ask for an identity hash code; use Object.hashCode instead (there may be a test for this)
     - do not call wait, notify, or notifyAll methods in Object
-    - at the time it is marked final, an object must not be locked (in fact, should never have been?)
-- lifecycle restrictions
-    - all objects are initially created in a non-final state
-    - an object marked final cannot be reverted to a non-final state
-    - an object marked final must be unreferenced by any other thread
+    - at the time it is marked locked, an object's monitor must not be locked (in fact, should never have been?)
+- side effects
+    - elements of locked arrays are stably available to readers just like final object fields (i.e., there is a memory fence)
+    - a locked object can be locked again, with no additional effect
+    - any attempt to mutate a permanently locked object raises java.lang.LockedObjectException
+    - any attempt to synchronize on a permanently locked object raises java.lang.LockedObjectException
+- object lifecycle
+    - all objects are initially created in a normal (unlocked) state
+    - an object marked locked cannot be "unlocked" (reverted to a normal state)
+    - an object marked locked must be unreferenced by any other thread (can we enforce this?)
     - the reference returned from the (unsafe) marking primitive must be used for all future accesses
     - any previous references (including the one passed to the marking primitive) must be unused
-    - in practice, this means you must mark an object final immediately after constructing it
+    - in practice, this means you must mark an object locked immediately after constructing it
+- API
+    - the method `lockPermanently` is used to lock an object permanently
+    - there is a predicate `isLockedPermanently` which can test whether an object is locked or not
+    - for initial experiments, these methods are in `sun.misc.Unsafe`; perhaps they belong on `Object` (cf. `clone`)