changeset 1658:26faca352942

Merge
author tonyp
date Fri, 20 Aug 2010 12:01:10 -0700
parents 66b9f90a9211 ee5cc9e78493
children 571f6b35140b
files
diffstat 24 files changed, 187 insertions(+), 46 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -112,6 +112,11 @@
     }
   }
 
+#ifdef COMPILER2
+  // Currently not supported anywhere.
+  FLAG_SET_DEFAULT(UseFPUForSpilling, false);
+#endif
+
   char buf[512];
   jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_v8() ? ", has_v8" : ""),
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -482,6 +482,15 @@
     }
   }
 
+#ifdef COMPILER2
+  if (UseFPUForSpilling) {
+    if (UseSSE < 2) {
+      // Only supported with SSE2+
+      FLAG_SET_DEFAULT(UseFPUForSpilling, false);
+    }
+  }
+#endif
+
   assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
   assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
 
@@ -520,6 +529,11 @@
     if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
       AllocatePrefetchDistance = 192;
       AllocatePrefetchLines = 4;
+#ifdef COMPILER2
+      if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
+        FLAG_SET_DEFAULT(UseFPUForSpilling, true);
+      }
+#endif
     }
   }
   assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
--- a/src/cpu/x86/vm/x86_32.ad	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/x86/vm/x86_32.ad	Fri Aug 20 12:01:10 2010 -0700
@@ -852,6 +852,39 @@
   }
 }
 
+static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+                            int src_hi, int dst_hi, int size, outputStream* st ) {
+  // 32-bit
+  if (cbuf) {
+    emit_opcode(*cbuf, 0x66);
+    emit_opcode(*cbuf, 0x0F);
+    emit_opcode(*cbuf, 0x6E);
+    emit_rm(*cbuf, 0x3, Matcher::_regEncode[dst_lo] & 7, Matcher::_regEncode[src_lo] & 7);
+#ifndef PRODUCT
+  } else if (!do_size) {
+    st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
+#endif
+  }
+  return 4;
+}
+
+
+static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+                                 int src_hi, int dst_hi, int size, outputStream* st ) {
+  // 32-bit
+  if (cbuf) {
+    emit_opcode(*cbuf, 0x66);
+    emit_opcode(*cbuf, 0x0F);
+    emit_opcode(*cbuf, 0x7E);
+    emit_rm(*cbuf, 0x3, Matcher::_regEncode[src_lo] & 7, Matcher::_regEncode[dst_lo] & 7);
+#ifndef PRODUCT
+  } else if (!do_size) {
+    st->print("movdl   %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
+#endif
+  }
+  return 4;
+}
+
 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
   if( cbuf ) {
     emit_opcode(*cbuf, 0x8B );
@@ -947,6 +980,12 @@
   if( dst_first_rc == rc_int && src_first_rc == rc_stack )
     size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
 
+  // Check for integer reg-xmm reg copy
+  if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
+    assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
+            "no 64 bit integer-float reg moves" );
+    return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+  }
   // --------------------------------------
   // Check for float reg-reg copy
   if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
@@ -1018,6 +1057,13 @@
     return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
   }
 
+  // Check for xmm reg-integer reg copy
+  if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
+    assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
+            "no 64 bit float-integer reg moves" );
+    return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+  }
+
   // Check for xmm store
   if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
     return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st);
--- a/src/cpu/x86/vm/x86_64.ad	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/x86/vm/x86_64.ad	Fri Aug 20 12:01:10 2010 -0700
@@ -1607,8 +1607,8 @@
           emit_opcode(*cbuf, 0x0F);
           emit_opcode(*cbuf, 0x7E);
           emit_rm(*cbuf, 0x3,
-                  Matcher::_regEncode[dst_first] & 7,
-                  Matcher::_regEncode[src_first] & 7);
+                  Matcher::_regEncode[src_first] & 7,
+                  Matcher::_regEncode[dst_first] & 7);
 #ifndef PRODUCT
         } else if (!do_size) {
           st->print("movdq   %s, %s\t# spill",
@@ -1637,8 +1637,8 @@
           emit_opcode(*cbuf, 0x0F);
           emit_opcode(*cbuf, 0x7E);
           emit_rm(*cbuf, 0x3,
-                  Matcher::_regEncode[dst_first] & 7,
-                  Matcher::_regEncode[src_first] & 7);
+                  Matcher::_regEncode[src_first] & 7,
+                  Matcher::_regEncode[dst_first] & 7);
 #ifndef PRODUCT
         } else if (!do_size) {
           st->print("movdl   %s, %s\t# spill",
--- a/src/cpu/zero/vm/bytecodeInterpreter_zero.inline.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/zero/vm/bytecodeInterpreter_zero.inline.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
+ * Copyright 2007, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -268,7 +268,7 @@
   return op1 - op2;
 }
 
-inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
+inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
   return ((juint) op1) >> (op2 & 0x1F);
 }
 
--- a/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/cpu/zero/vm/javaFrameAnchor_zero.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -82,6 +82,10 @@
     return _last_Java_fp;
   }
 
+  address last_Java_pc() const {
+    return _last_Java_pc;
+  }
+
   static ByteSize last_Java_fp_offset() {
     return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
   }
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -435,22 +435,22 @@
   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
-    ShouldNotCallThis();
+    memmove(to, from, count);
   }
   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
                                       HeapWord* to,
                                       size_t    count) {
-    ShouldNotCallThis();
+    memmove(to, from, count * 2);
   }
   void _Copy_arrayof_conjoint_jints(HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
-    ShouldNotCallThis();
+    memmove(to, from, count * 4);
   }
   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
                                      HeapWord* to,
                                      size_t    count) {
-    ShouldNotCallThis();
+    memmove(to, from, count * 8);
   }
 };
 
--- a/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/os_cpu/linux_zero/vm/thread_linux_zero.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2009 Red Hat, Inc.
+ * Copyright 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,9 @@
  *
  */
 
-// This file is intentionally empty
+#include "incls/_precompiled.incl"
+#include "incls/_thread_linux_zero.cpp.incl"
 
-void JavaThread::cache_global_variables() { }
+void JavaThread::cache_global_variables() {
+  // nothing to do
+}
--- a/src/share/vm/code/nmethod.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/code/nmethod.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -433,6 +433,10 @@
   _unload_reported            = false;           // jvmti state
 
   NOT_PRODUCT(_has_debug_info = false);
+#ifdef ASSERT
+  _oops_are_stale             = false;
+#endif
+
   _oops_do_mark_link       = NULL;
   _jmethod_id              = NULL;
   _osr_link                = NULL;
@@ -1230,11 +1234,10 @@
 bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
 
-  bool was_alive = false;
-
   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
   methodHandle the_method(method());
+  No_Safepoint_Verifier nsv;
 
   {
     // If the method is already zombie there is nothing to do
@@ -1303,13 +1306,27 @@
   // state will be flushed later when the transition to zombie
   // happens or they get unloaded.
   if (state == zombie) {
-    // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
-    // and it hasn't already been reported for this nmethod then report it now.
-    // (the event may have been reported earilier if the GC marked it for unloading).
-    post_compiled_method_unload();
+    {
+      // Flushing dependecies must be done before any possible
+      // safepoint can sneak in, otherwise the oops used by the
+      // dependency logic could have become stale.
+      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+      flush_dependencies(NULL);
+    }
 
-    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    flush_dependencies(NULL);
+    {
+      // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
+      // and it hasn't already been reported for this nmethod then report it now.
+      // (the event may have been reported earilier if the GC marked it for unloading).
+      Pause_No_Safepoint_Verifier pnsv(&nsv);
+      post_compiled_method_unload();
+    }
+
+#ifdef ASSERT
+    // It's no longer safe to access the oops section since zombie
+    // nmethods aren't scanned for GC.
+    _oops_are_stale = true;
+#endif
   } else {
     assert(state == not_entrant, "other cases may need to be handled differently");
   }
--- a/src/share/vm/code/nmethod.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/code/nmethod.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -177,6 +177,10 @@
   // Protected by Patching_lock
   unsigned char _state;                      // {alive, not_entrant, zombie, unloaded)
 
+#ifdef ASSERT
+  bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
+#endif
+
   enum { alive        = 0,
          not_entrant  = 1, // uncommon trap has happened but activations may still exist
          zombie       = 2,
@@ -434,6 +438,7 @@
   oop*  oop_addr_at(int index) const {  // for GC
     // relocation indexes are biased by 1 (because 0 is reserved)
     assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
+    assert(!_oops_are_stale, "oops are stale");
     return &oops_begin()[index - 1];
   }
 
--- a/src/share/vm/compiler/compileBroker.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -1652,12 +1652,10 @@
 void CompileBroker::handle_full_code_cache() {
   UseInterpreter = true;
   if (UseCompiler || AlwaysCompileLoopMethods ) {
-    CompilerThread* thread = CompilerThread::current();
-    CompileLog* log = thread->log();
-    if (log != NULL) {
-      log->begin_elem("code_cache_full");
-      log->stamp();
-      log->end_elem();
+    if (xtty != NULL) {
+      xtty->begin_elem("code_cache_full");
+      xtty->stamp();
+      xtty->end_elem();
     }
     warning("CodeCache is full. Compiler has been disabled.");
     warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -421,7 +421,9 @@
 #ifdef ASSERT
   if (istate->_msg != initialize) {
     assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
-  IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
+#ifndef SHARK
+    IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
+#endif // !SHARK
   }
   // Verify linkages.
   interpreterState l = istate;
--- a/src/share/vm/opto/c2_globals.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/opto/c2_globals.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -178,6 +178,9 @@
   product(bool, ReduceBulkZeroing, true,                                    \
           "When bulk-initializing, try to avoid needless zeroing")          \
                                                                             \
+  product(bool, UseFPUForSpilling, false,                                   \
+          "Spill integer registers to FPU instead of stack when possible")  \
+                                                                            \
   develop_pd(intx, RegisterCostAreaRatio,                                   \
           "Spill selection in reg allocator: scale area by (X/64K) before " \
           "adding cost")                                                    \
--- a/src/share/vm/opto/coalesce.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/opto/coalesce.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -780,6 +780,14 @@
   // Number of bits free
   uint rm_size = rm.Size();
 
+  if (UseFPUForSpilling && rm.is_AllStack() ) {
+    // Don't coalesce when frequency difference is large
+    Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
+    Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
+    if (src_def_b->_freq > 10*dst_b->_freq )
+      return false;
+  }
+
   // If we can use any stack slot, then effective size is infinite
   if( rm.is_AllStack() ) rm_size += 1000000;
   // Incompatible masks, no way to coalesce
--- a/src/share/vm/opto/matcher.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/opto/matcher.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -456,6 +456,23 @@
   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 
+   if (UseFPUForSpilling) {
+     // This mask logic assumes that the spill operations are
+     // symmetric and that the registers involved are the same size.
+     // On sparc for instance we may have to use 64 bit moves will
+     // kill 2 registers when used with F0-F31.
+     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
+     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
+#ifdef _LP64
+     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
+     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
+     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
+     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
+#else
+     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
+#endif
+   }
+
   // Make up debug masks.  Any spill slot plus callee-save registers.
   // Caller-save registers are assumed to be trashable by the various
   // inline-cache fixup routines.
--- a/src/share/vm/opto/reg_split.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/opto/reg_split.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -975,6 +975,19 @@
               insidx++;  // Reset iterator to skip USE side split
               continue;
             }
+
+            if (UseFPUForSpilling && n->is_Call() && !uup && !dup ) {
+              // The use at the call can force the def down so insert
+              // a split before the use to allow the def more freedom.
+              maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
+              // If it wasn't split bail
+              if (!maxlrg) {
+                return 0;
+              }
+              insidx++;  // Reset iterator to skip USE side split
+              continue;
+            }
+
             // Here is the logic chart which describes USE Splitting:
             // 0 = false or DOWN, 1 = true or UP
             //
--- a/src/share/vm/runtime/arguments.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/arguments.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -3005,10 +3005,6 @@
     CommandLineFlags::printSetFlags();
   }
 
-  if (PrintFlagsFinal) {
-    CommandLineFlags::printFlags();
-  }
-
   // Apply CPU specific policy for the BiasedLocking
   if (UseBiasedLocking) {
     if (!VM_Version::use_biased_locking() &&
--- a/src/share/vm/runtime/frame.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/frame.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -215,17 +215,15 @@
   return !nm->is_at_poll_return(pc());
 }
 
-void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) {
-// Schedule deoptimization of an nmethod activation with this frame.
-
-  // Store the original pc before an patch (or request to self-deopt)
-  // in the published location of the frame.
-
+void frame::deoptimize(JavaThread* thread) {
+  // Schedule deoptimization of an nmethod activation with this frame.
   assert(_cb != NULL && _cb->is_nmethod(), "must be");
   nmethod* nm = (nmethod*)_cb;
 
   // This is a fix for register window patching race
-  if (NeedsDeoptSuspend && !thread_is_known_safe) {
+  if (NeedsDeoptSuspend && Thread::current() != thread) {
+    assert(SafepointSynchronize::is_at_safepoint(),
+           "patching other threads for deopt may only occur at a safepoint");
 
     // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
     // we could see the frame again and ask for it to be deoptimized since
@@ -248,7 +246,11 @@
     // whether to spin or block. It isn't worth it. Just treat it like
     // native and be done with it.
     //
-    JavaThreadState state = thread->thread_state();
+    // Examine the state of the thread at the start of safepoint since
+    // threads that were in native at the start of the safepoint could
+    // come to a halt during the safepoint, changing the current value
+    // of the safepoint_state.
+    JavaThreadState state = thread->safepoint_state()->orig_thread_state();
     if (state == _thread_in_native || state == _thread_in_native_trans) {
       // Since we are at a safepoint the target thread will stop itself
       // before it can return to java as long as we remain at the safepoint.
--- a/src/share/vm/runtime/frame.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/frame.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -174,7 +174,7 @@
   address  sender_pc() const;
 
   // Support for deoptimization
-  void deoptimize(JavaThread* thread, bool thread_is_known_safe = false);
+  void deoptimize(JavaThread* thread);
 
   // The frame's original SP, before any extension by an interpreted callee;
   // used for packing debug info into vframeArray objects and vframeArray lookup.
--- a/src/share/vm/runtime/init.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/init.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -128,6 +128,12 @@
     Universe::verify();   // make sure we're starting with a clean slate
   }
 
+  // All the flags that get adjusted by VM_Version_init and os::init_2
+  // have been set so dump the flags now.
+  if (PrintFlagsFinal) {
+    CommandLineFlags::printFlags();
+  }
+
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/safepoint.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -782,6 +782,9 @@
 
   JavaThreadState state = _thread->thread_state();
 
+  // Save the state at the start of safepoint processing.
+  _orig_thread_state = state;
+
   // Check for a thread that is suspended. Note that thread resume tries
   // to grab the Threads_lock which we own here, so a thread cannot be
   // resumed during safepoint synchronization.
--- a/src/share/vm/runtime/safepoint.hpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/safepoint.hpp	Fri Aug 20 12:01:10 2010 -0700
@@ -185,6 +185,7 @@
 
   JavaThread *                   _thread;
   volatile suspend_type          _type;
+  JavaThreadState                _orig_thread_state;
 
 
  public:
@@ -199,6 +200,7 @@
   JavaThread*  thread() const         { return _thread; }
   suspend_type type() const           { return _type; }
   bool         is_running() const     { return (_type==_running); }
+  JavaThreadState orig_thread_state() const { return _orig_thread_state; }
 
   // Support for safepoint timeout (debugging)
   bool has_called_back() const                   { return _has_called_back; }
--- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -2493,15 +2493,13 @@
   }
 
   // Must unlock before calling set_code
+
   // Install the generated code.
   if (nm != NULL) {
     method->set_code(method, nm);
     nm->post_compiled_method_load_event();
   } else {
     // CodeCache is full, disable compilation
-    // Ought to log this but compile log is only per compile thread
-    // and we're some non descript Java thread.
-    MutexUnlocker mu(AdapterHandlerLibrary_lock);
     CompileBroker::handle_full_code_cache();
   }
   return nm;
--- a/src/share/vm/runtime/thread.cpp	Fri Aug 20 13:17:08 2010 -0400
+++ b/src/share/vm/runtime/thread.cpp	Fri Aug 20 12:01:10 2010 -0700
@@ -2110,8 +2110,7 @@
     }
     if (f.id() == thread->must_deopt_id()) {
       thread->clear_must_deopt_id();
-      // Since we know we're safe to deopt the current state is a safe state
-      f.deoptimize(thread, true);
+      f.deoptimize(thread);
     } else {
       fatal("missed deoptimization!");
     }