changeset 7766:c548512bd499

Merge
author minqi
date Tue, 27 Jan 2015 20:02:35 -0800
parents 078a77b69f36 0f8fc58bc5a2
children c1b8e92e0469
files src/share/vm/classfile/verifier.cpp src/share/vm/interpreter/linkResolver.cpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/instanceKlass.hpp
diffstat 100 files changed, 1857 insertions(+), 696 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Jan 27 05:51:00 2015 -0800
+++ b/.hgtags	Tue Jan 27 20:02:35 2015 -0800
@@ -448,3 +448,5 @@
 65a9747147b8090037541040ba67156ec914db6a jdk9-b43
 43a44b56dca61a4d766a20f0528fdd8b5ceff873 jdk9-b44
 5dc8184af1e2bb30b0103113d1f1a58a21a80c37 jdk9-b45
+a184ee1d717297bd35b7c3e35393e137921a3ed2 jdk9-b46
+3b241fb72b8925b75941d612db762a6d5da66d02 jdk9-b47
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -483,15 +483,6 @@
 
 }
 
-jbyte* G1PostBarrierStub::_byte_map_base = NULL;
-
-jbyte* G1PostBarrierStub::byte_map_base_slow() {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->is_a(BarrierSet::G1SATBCTLogging),
-         "Must be if we're using this.");
-  return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
-}
-
 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1374,6 +1374,7 @@
 }
 
 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
+                                                                Register method_counters,
                                                                 Register Rtmp,
                                                                 Label &profile_continue) {
   assert(ProfileInterpreter, "must be profiling interpreter");
@@ -1386,9 +1387,8 @@
   br_notnull_short(ImethodDataPtr, Assembler::pn, done);
 
   // Test to see if we should create a method data oop
-  AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
-  sethi(profile_limit, Rtmp);
-  ld(Rtmp, profile_limit.low10(), Rtmp);
+  Address profile_limit(method_counters, MethodCounters::interpreter_profile_limit_offset());
+  ld(profile_limit, Rtmp);
   cmp(invocation_count, Rtmp);
   // Use long branches because call_VM() code and following code generated by
   // test_backedge_count_for_osr() is large in debug VM.
@@ -2375,6 +2375,7 @@
 
 #ifndef CC_INTERP
 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
+                                                             Register method_counters,
                                                              Register branch_bcp,
                                                              Register Rtmp ) {
   Label did_not_overflow;
@@ -2382,8 +2383,8 @@
   assert_different_registers(backedge_count, Rtmp, branch_bcp);
   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
 
-  AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
-  load_contents(limit, Rtmp);
+  Address limit(method_counters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
+  ld(limit, Rtmp);
   cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
 
   // When ProfileInterpreter is on, the backedge_count comes from the
@@ -2500,17 +2501,13 @@
 
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
-                                                        int increment, int mask,
+                                                        int increment, Address mask_addr,
                                                         Register scratch1, Register scratch2,
                                                         Condition cond, Label *where) {
   ld(counter_addr, scratch1);
   add(scratch1, increment, scratch1);
-  if (is_simm13(mask)) {
-    andcc(scratch1, mask, G0);
-  } else {
-    set(mask, scratch2);
-    andcc(scratch1, scratch2,  G0);
-  }
+  ld(mask_addr, scratch2);
+  andcc(scratch1, scratch2,  G0);
   br(cond, false, Assembler::pn, *where);
   delayed()->st(scratch1, counter_addr);
 }
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -267,7 +267,7 @@
   void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
   void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 );
 #ifndef CC_INTERP
-  void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp );
+  void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register branch_bcp, Register Rtmp );
 
 #endif /* CC_INTERP */
   // Object locking
@@ -280,7 +280,7 @@
   void set_method_data_pointer_for_bcp();
   void test_method_data_pointer(Label& zero_continue);
   void verify_method_data_pointer();
-  void test_invocation_counter_for_mdp(Register invocation_count, Register Rtmp, Label &profile_continue);
+  void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rtmp, Label &profile_continue);
 
   void set_mdp_data_at(int constant, Register value);
   void increment_mdp_data_at(Address counter, Register bumped_count,
@@ -291,7 +291,7 @@
                              Register bumped_count, Register scratch2,
                              bool decrement = false);
   void increment_mask_and_jump(Address counter_addr,
-                               int increment, int mask,
+                               int increment, Address mask_addr,
                                Register scratch1, Register scratch2,
                                Condition cond, Label *where);
   void set_mdp_flag_at(int flag_constant, Register scratch);
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -282,12 +282,11 @@
 void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
   // Note: In tiered we increment either counters in MethodCounters* or in
   // MDO depending if we're profiling or not.
-  const Register Rcounters = G3_scratch;
+  const Register G3_method_counters = G3_scratch;
   Label done;
 
   if (TieredCompilation) {
     const int increment = InvocationCounter::count_increment;
-    const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
     Label no_mdo;
     if (ProfileInterpreter) {
       // If no method data exists, go to profile_continue.
@@ -297,6 +296,7 @@
       Address mdo_invocation_counter(G4_scratch,
                                      in_bytes(MethodData::invocation_counter_offset()) +
                                      in_bytes(InvocationCounter::counter_offset()));
+      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
                                  G3_scratch, Lscratch,
                                  Assembler::zero, overflow);
@@ -305,20 +305,21 @@
 
     // Increment counter in MethodCounters*
     __ bind(no_mdo);
-    Address invocation_counter(Rcounters,
+    Address invocation_counter(G3_method_counters,
             in_bytes(MethodCounters::invocation_counter_offset()) +
             in_bytes(InvocationCounter::counter_offset()));
-    __ get_method_counters(Lmethod, Rcounters, done);
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
     __ increment_mask_and_jump(invocation_counter, increment, mask,
                                G4_scratch, Lscratch,
                                Assembler::zero, overflow);
     __ bind(done);
-  } else {
+  } else { // not TieredCompilation
     // Update standard invocation counters
-    __ get_method_counters(Lmethod, Rcounters, done);
-    __ increment_invocation_counter(Rcounters, O0, G4_scratch);
+    __ get_method_counters(Lmethod, G3_method_counters, done);
+    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
     if (ProfileInterpreter) {
-      Address interpreter_invocation_counter(Rcounters,
+      Address interpreter_invocation_counter(G3_method_counters,
             in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
       __ ld(interpreter_invocation_counter, G4_scratch);
       __ inc(G4_scratch);
@@ -327,16 +328,16 @@
 
     if (ProfileInterpreter && profile_method != NULL) {
       // Test to see if we should create a method data oop
-      AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
-      __ load_contents(profile_limit, G3_scratch);
-      __ cmp_and_br_short(O0, G3_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
+      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
+      __ ld(profile_limit, G1_scratch);
+      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
 
       // if no method data exists, go to profile_method
       __ test_method_data_pointer(*profile_method);
     }
 
-    AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
-    __ load_contents(invocation_limit, G3_scratch);
+    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
+    __ ld(invocation_limit, G3_scratch);
     __ cmp(O0, G3_scratch);
     __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
     __ delayed()->nop();
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1599,13 +1599,12 @@
     // Bump bytecode pointer by displacement (take the branch)
     __ delayed()->add( O1_disp, Lbcp, Lbcp );     // add to bc addr
 
-    const Register Rcounters = G3_scratch;
-    __ get_method_counters(Lmethod, Rcounters, Lforward);
+    const Register G3_method_counters = G3_scratch;
+    __ get_method_counters(Lmethod, G3_method_counters, Lforward);
 
     if (TieredCompilation) {
       Label Lno_mdo, Loverflow;
       int increment = InvocationCounter::count_increment;
-      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
       if (ProfileInterpreter) {
         // If no method data exists, go to profile_continue.
         __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
@@ -1614,6 +1613,7 @@
         // Increment backedge counter in the MDO
         Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
                                                  in_bytes(InvocationCounter::counter_offset()));
+        Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
                                    Assembler::notZero, &Lforward);
         __ ba_short(Loverflow);
@@ -1621,9 +1621,10 @@
 
       // If there's no MDO, increment counter in MethodCounters*
       __ bind(Lno_mdo);
-      Address backedge_counter(Rcounters,
+      Address backedge_counter(G3_method_counters,
               in_bytes(MethodCounters::backedge_counter_offset()) +
               in_bytes(InvocationCounter::counter_offset()));
+      Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
       __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
                                  Assembler::notZero, &Lforward);
       __ bind(Loverflow);
@@ -1663,18 +1664,19 @@
       __ jmp(O2, G0);
       __ delayed()->nop();
 
-    } else {
+    } else { // not TieredCompilation
       // Update Backedge branch separately from invocations
       const Register G4_invoke_ctr = G4;
-      __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
+      __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
       if (ProfileInterpreter) {
-        __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
+        __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
         if (UseOnStackReplacement) {
-          __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
+
+          __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
         }
       } else {
         if (UseOnStackReplacement) {
-          __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
+          __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
         }
       }
     }
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -541,15 +541,6 @@
 
 }
 
-jbyte* G1PostBarrierStub::_byte_map_base = NULL;
-
-jbyte* G1PostBarrierStub::byte_map_base_slow() {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->is_a(BarrierSet::G1SATBCTLogging),
-         "Must be if we're using this.");
-  return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
-}
-
 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(addr()->is_register(), "Precondition.");
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1360,7 +1360,7 @@
 
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
-                                                        int increment, int mask,
+                                                        int increment, Address mask,
                                                         Register scratch, bool preloaded,
                                                         Condition cond, Label* where) {
   if (!preloaded) {
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -182,7 +182,7 @@
   void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
                              bool decrement = false);
   void increment_mask_and_jump(Address counter_addr,
-                               int increment, int mask,
+                               int increment, Address mask,
                                Register scratch, bool preloaded,
                                Condition cond, Label* where);
   void set_mdp_flag_at(Register mdp_in, int flag_constant);
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1426,7 +1426,7 @@
 
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
-                                                        int increment, int mask,
+                                                        int increment, Address mask,
                                                         Register scratch, bool preloaded,
                                                         Condition cond, Label* where) {
   if (!preloaded) {
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -191,7 +191,7 @@
   void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
                              bool decrement = false);
   void increment_mask_and_jump(Address counter_addr,
-                               int increment, int mask,
+                               int increment, Address mask,
                                Register scratch, bool preloaded,
                                Condition cond, Label* where);
   void set_mdp_flag_at(Register mdp_in, int flag_constant);
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -346,7 +346,6 @@
   // depending if we're profiling or not.
   if (TieredCompilation) {
     int increment = InvocationCounter::count_increment;
-    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
     Label no_mdo;
     if (ProfileInterpreter) {
       // Are we profiling?
@@ -356,6 +355,7 @@
       // Increment counter in the MDO
       const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
                                                 in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
       __ jmp(done);
     }
@@ -366,11 +366,12 @@
                   InvocationCounter::counter_offset());
 
     __ get_method_counters(rbx, rax, done);
+    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
     __ increment_mask_and_jump(invocation_counter, increment, mask,
                                rcx, false, Assembler::zero, overflow);
     __ bind(done);
-  } else {
-    const Address backedge_counter  (rax,
+  } else { // not TieredCompilation
+    const Address backedge_counter(rax,
                   MethodCounters::backedge_counter_offset() +
                   InvocationCounter::counter_offset());
     const Address invocation_counter(rax,
@@ -400,16 +401,16 @@
 
     if (ProfileInterpreter && profile_method != NULL) {
       // Test to see if we should create a method data oop
-      __ cmp32(rcx,
-               ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
       __ jcc(Assembler::less, *profile_method_continue);
 
       // if no method data exists, go to profile_method
       __ test_method_data_pointer(rax, *profile_method);
     }
 
-    __ cmp32(rcx,
-             ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
     __ jcc(Assembler::aboveEqual, *overflow);
     __ bind(done);
   }
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -299,7 +299,6 @@
   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
   if (TieredCompilation) {
     int increment = InvocationCounter::count_increment;
-    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
     Label no_mdo;
     if (ProfileInterpreter) {
       // Are we profiling?
@@ -309,6 +308,7 @@
       // Increment counter in the MDO
       const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
                                                 in_bytes(InvocationCounter::counter_offset()));
+      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
       __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
       __ jmp(done);
     }
@@ -318,10 +318,11 @@
                   MethodCounters::invocation_counter_offset() +
                   InvocationCounter::counter_offset());
     __ get_method_counters(rbx, rax, done);
+    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
     __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
                                false, Assembler::zero, overflow);
     __ bind(done);
-  } else {
+  } else { // not TieredCompilation
     const Address backedge_counter(rax,
                   MethodCounters::backedge_counter_offset() +
                   InvocationCounter::counter_offset());
@@ -350,14 +351,16 @@
 
     if (ProfileInterpreter && profile_method != NULL) {
       // Test to see if we should create a method data oop
-      __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
+      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
       __ jcc(Assembler::less, *profile_method_continue);
 
       // if no method data exists, go to profile_method
       __ test_method_data_pointer(rax, *profile_method);
     }
 
-    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
+    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
+    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
     __ jcc(Assembler::aboveEqual, *overflow);
     __ bind(done);
   }
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1621,7 +1621,6 @@
     if (TieredCompilation) {
       Label no_mdo;
       int increment = InvocationCounter::count_increment;
-      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
       if (ProfileInterpreter) {
         // Are we profiling?
         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@@ -1630,6 +1629,7 @@
         // Increment the MDO backedge counter
         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
                                                 in_bytes(InvocationCounter::counter_offset()));
+        const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
                                    rax, false, Assembler::zero, &backedge_counter_overflow);
         __ jmp(dispatch);
@@ -1637,9 +1637,10 @@
       __ bind(no_mdo);
       // Increment backedge counter in MethodCounters*
       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
+      const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
                                  rax, false, Assembler::zero, &backedge_counter_overflow);
-    } else {
+    } else { // not TieredCompilation
       // increment counter
       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
@@ -1653,8 +1654,7 @@
 
       if (ProfileInterpreter) {
         // Test to see if we should create a method data oop
-        __ cmp32(rax,
-                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+        __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
         __ jcc(Assembler::less, dispatch);
 
         // if no method data exists, go to profile method
@@ -1662,8 +1662,7 @@
 
         if (UseOnStackReplacement) {
           // check for overflow against rbx, which is the MDO taken count
-          __ cmp32(rbx,
-                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ jcc(Assembler::below, dispatch);
 
           // When ProfileInterpreter is on, the backedge_count comes from the
@@ -1678,8 +1677,7 @@
       } else {
         if (UseOnStackReplacement) {
           // check for overflow against rax, which is the sum of the counters
-          __ cmp32(rax,
-                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
 
         }
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1642,7 +1642,6 @@
     if (TieredCompilation) {
       Label no_mdo;
       int increment = InvocationCounter::count_increment;
-      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
       if (ProfileInterpreter) {
         // Are we profiling?
         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
@@ -1651,6 +1650,7 @@
         // Increment the MDO backedge counter
         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
                                            in_bytes(InvocationCounter::counter_offset()));
+        const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
                                    rax, false, Assembler::zero, &backedge_counter_overflow);
         __ jmp(dispatch);
@@ -1658,9 +1658,10 @@
       __ bind(no_mdo);
       // Increment backedge counter in MethodCounters*
       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
+         const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
                                  rax, false, Assembler::zero, &backedge_counter_overflow);
-    } else {
+    } else { // not TieredCompilation
       // increment counter
       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
@@ -1674,8 +1675,7 @@
 
       if (ProfileInterpreter) {
         // Test to see if we should create a method data oop
-        __ cmp32(rax,
-                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
+        __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
         __ jcc(Assembler::less, dispatch);
 
         // if no method data exists, go to profile method
@@ -1683,8 +1683,7 @@
 
         if (UseOnStackReplacement) {
           // check for overflow against ebx which is the MDO taken count
-          __ cmp32(rbx,
-                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ jcc(Assembler::below, dispatch);
 
           // When ProfileInterpreter is on, the backedge_count comes
@@ -1702,8 +1701,7 @@
         if (UseOnStackReplacement) {
           // check for overflow against eax, which is the sum of the
           // counters
-          __ cmp32(rax,
-                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
+          __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
 
         }
--- a/src/os/bsd/vm/perfMemory_bsd.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -197,7 +197,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -211,27 +242,185 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirfd(dirp))) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirfd(dirp);
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
 
-
 // return the user name for the given user id
 //
 // the caller is expected to free the allocated memory.
@@ -317,9 +506,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -344,25 +535,14 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name);
       continue;
     }
 
-    // Since we don't create the backing store files in directories
-    // pointed to by symbolic links, we also don't follow them when
-    // looking for the files. We check for a symbolic link after the
-    // call to opendir in order to eliminate a small window where the
-    // symlink can be exploited.
-    //
-    if (!is_directory_secure(usrdir_name)) {
-      FREE_C_HEAP_ARRAY(char, usrdir_name);
-      os::closedir(subdirp);
-      continue;
-    }
-
     struct dirent* udentry;
     char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
     errno = 0;
@@ -465,26 +645,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -496,17 +656,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
-    os::closedir(dirp);
+    // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -520,6 +674,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -530,7 +685,7 @@
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
 
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -553,11 +708,14 @@
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
 
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf);
 }
 
@@ -614,19 +772,54 @@
     return -1;
   }
 
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
+
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
   int result;
-
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -684,8 +877,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -717,13 +917,21 @@
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
 
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
+
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
 
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name);
   FREE_C_HEAP_ARRAY(char, dirname);
@@ -838,12 +1046,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
--- a/src/os/linux/vm/perfMemory_linux.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -197,7 +197,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -211,22 +242,180 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirfd(dirp))) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirfd(dirp);
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
@@ -317,9 +506,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -344,7 +535,8 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name);
@@ -465,26 +657,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -496,17 +668,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
-    os::closedir(dirp);
+    // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -520,6 +686,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -528,9 +695,8 @@
     if (pid == 0) {
 
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
-
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -552,12 +718,14 @@
     //
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
-
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf);
 }
 
@@ -614,19 +782,54 @@
     return -1;
   }
 
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
+
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
   int result;
-
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -684,8 +887,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -716,6 +926,13 @@
 
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
 
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
@@ -723,7 +940,7 @@
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name);
   FREE_C_HEAP_ARRAY(char, dirname);
@@ -838,12 +1055,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -199,7 +199,38 @@
 }
 
 
-// check if the given path is considered a secure directory for
+// Check if the given statbuf is considered a secure directory for
+// the backing store files. Returns true if the directory is considered
+// a secure location. Returns false if the statbuf is a symbolic link or
+// if an error occurred.
+//
+static bool is_statbuf_secure(struct stat *statp) {
+  if (S_ISLNK(statp->st_mode) || !S_ISDIR(statp->st_mode)) {
+    // The path represents a link or some non-directory file type,
+    // which is not what we expected. Declare it insecure.
+    //
+    return false;
+  }
+  // We have an existing directory, check if the permissions are safe.
+  //
+  if ((statp->st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+    // The directory is open for writing and could be subjected
+    // to a symlink or a hard link attack. Declare it insecure.
+    //
+    return false;
+  }
+  // See if the uid of the directory matches the effective uid of the process.
+  //
+  if (statp->st_uid != geteuid()) {
+    // The directory was not created by this user, declare it insecure.
+    //
+    return false;
+  }
+  return true;
+}
+
+
+// Check if the given path is considered a secure directory for
 // the backing store files. Returns true if the directory exists
 // and is considered a secure location. Returns false if the path
 // is a symbolic link or if an error occurred.
@@ -213,27 +244,185 @@
     return false;
   }
 
-  // the path exists, now check it's mode
-  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
-    // the path represents a link or some non-directory file type,
-    // which is not what we expected. declare it insecure.
-    //
+  // The path exists, see if it is secure.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check if the given directory file descriptor is considered a secure
+// directory for the backing store files. Returns true if the directory
+// exists and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_dirfd_secure(int dir_fd) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::fstat(dir_fd, &statbuf), result);
+  if (result == OS_ERR) {
     return false;
   }
-  else {
-    // we have an existing directory, check if the permissions are safe.
-    //
-    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
-      // the directory is open for writing and could be subjected
-      // to a symlnk attack. declare it insecure.
-      //
-      return false;
+
+  // The path exists, now check its mode.
+  return is_statbuf_secure(&statbuf);
+}
+
+
+// Check to make sure fd1 and fd2 are referencing the same file system object.
+//
+static bool is_same_fsobject(int fd1, int fd2) {
+  struct stat statbuf1;
+  struct stat statbuf2;
+  int result = 0;
+
+  RESTARTABLE(::fstat(fd1, &statbuf1), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+  RESTARTABLE(::fstat(fd2, &statbuf2), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  if ((statbuf1.st_ino == statbuf2.st_ino) &&
+      (statbuf1.st_dev == statbuf2.st_dev)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+// Open the directory of the given path and validate it.
+// Return a DIR * of the open directory.
+//
+static DIR *open_directory_secure(const char* dirname) {
+  // Open the directory using open() so that it can be verified
+  // to be secure by calling is_dirfd_secure(), opendir() and then check
+  // to see if they are the same file system object.  This method does not
+  // introduce a window of opportunity for the directory to be attacked that
+  // calling opendir() and is_directory_secure() does.
+  int result;
+  DIR *dirp = NULL;
+  RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
+  if (result == OS_ERR) {
+    // Directory doesn't exist or is a symlink, so there is nothing to cleanup.
+    if (PrintMiscellaneous && Verbose) {
+      if (errno == ELOOP) {
+        warning("directory %s is a symlink and is not secure\n", dirname);
+      } else {
+        warning("could not open directory %s: %s\n", dirname, strerror(errno));
+      }
     }
+    return dirp;
+  }
+  int fd = result;
+
+  // Determine if the open directory is secure.
+  if (!is_dirfd_secure(fd)) {
+    // The directory is not a secure directory.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Open the directory.
+  dirp = ::opendir(dirname);
+  if (dirp == NULL) {
+    // The directory doesn't exist, close fd and return.
+    os::close(fd);
+    return dirp;
+  }
+
+  // Check to make sure fd and dirp are referencing the same file system object.
+  if (!is_same_fsobject(fd, dirp->dd_fd)) {
+    // The directory is not secure.
+    os::close(fd);
+    os::closedir(dirp);
+    dirp = NULL;
+    return dirp;
+  }
+
+  // Close initial open now that we know directory is secure
+  os::close(fd);
+
+  return dirp;
+}
+
+// NOTE: The code below uses fchdir(), open() and unlink() because
+// fdopendir(), openat() and unlinkat() are not supported on all
+// versions.  Once the support for fdopendir(), openat() and unlinkat()
+// is available on all supported versions the code can be changed
+// to use these functions.
+
+// Open the directory of the given path, validate it and set the
+// current working directory to it.
+// Return a DIR * of the open directory and the saved cwd fd.
+//
+static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
+
+  // Open the directory.
+  DIR* dirp = open_directory_secure(dirname);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so there is nothing to cleanup.
+    return dirp;
+  }
+  int fd = dirp->dd_fd;
+
+  // Open a fd to the cwd and save it off.
+  int result;
+  RESTARTABLE(::open(".", O_RDONLY), result);
+  if (result == OS_ERR) {
+    *saved_cwd_fd = -1;
+  } else {
+    *saved_cwd_fd = result;
+  }
+
+  // Set the current directory to dirname by using the fd of the directory.
+  result = fchdir(fd);
+
+  return dirp;
+}
+
+// Close the directory and restore the current working directory.
+//
+static void close_directory_secure_cwd(DIR* dirp, int saved_cwd_fd) {
+
+  int result;
+  // If we have a saved cwd change back to it and close the fd.
+  if (saved_cwd_fd != -1) {
+    result = fchdir(saved_cwd_fd);
+    ::close(saved_cwd_fd);
+  }
+
+  // Close the directory.
+  os::closedir(dirp);
+}
+
+// Check if the given file descriptor is considered a secure.
+//
+static bool is_file_secure(int fd, const char *filename) {
+
+  int result;
+  struct stat statbuf;
+
+  // Determine if the file is secure.
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed on %s: %s\n", filename, strerror(errno));
+    }
+    return false;
+  }
+  if (statbuf.st_nlink > 1) {
+    // A file with multiple links is not expected.
+    if (PrintMiscellaneous && Verbose) {
+      warning("file %s has multiple links\n", filename);
+    }
+    return false;
   }
   return true;
 }
 
-
 // return the user name for the given user id
 //
 // the caller is expected to free the allocated memory.
@@ -308,9 +497,11 @@
 
   const char* tmpdirname = os::get_temp_directory();
 
+  // open the temp directory
   DIR* tmpdirp = os::opendir(tmpdirname);
 
   if (tmpdirp == NULL) {
+    // Cannot open the directory to get the user name, return.
     return NULL;
   }
 
@@ -335,7 +526,8 @@
     strcat(usrdir_name, "/");
     strcat(usrdir_name, dentry->d_name);
 
-    DIR* subdirp = os::opendir(usrdir_name);
+    // open the user directory
+    DIR* subdirp = open_directory_secure(usrdir_name);
 
     if (subdirp == NULL) {
       FREE_C_HEAP_ARRAY(char, usrdir_name);
@@ -504,26 +696,6 @@
 }
 
 
-// remove file
-//
-// this method removes the file with the given file name in the
-// named directory.
-//
-static void remove_file(const char* dirname, const char* filename) {
-
-  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
-  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
-
-  strcpy(path, dirname);
-  strcat(path, "/");
-  strcat(path, filename);
-
-  remove_file(path);
-
-  FREE_C_HEAP_ARRAY(char, path);
-}
-
-
 // cleanup stale shared memory resources
 //
 // This method attempts to remove all stale shared memory files in
@@ -535,17 +707,11 @@
 //
 static void cleanup_sharedmem_resources(const char* dirname) {
 
-  // open the user temp directory
-  DIR* dirp = os::opendir(dirname);
-
+  int saved_cwd_fd;
+  // open the directory
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
   if (dirp == NULL) {
-    // directory doesn't exist, so there is nothing to cleanup
-    return;
-  }
-
-  if (!is_directory_secure(dirname)) {
-    // the directory is not a secure directory
-    os::closedir(dirp);
+    // directory doesn't exist or is insecure, so there is nothing to cleanup
     return;
   }
 
@@ -559,6 +725,7 @@
   //
   struct dirent* entry;
   char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+
   errno = 0;
   while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
 
@@ -569,7 +736,7 @@
       if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
 
         // attempt to remove all unexpected files, except "." and ".."
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
       }
 
       errno = 0;
@@ -592,11 +759,14 @@
     if ((pid == os::current_process_id()) ||
         (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
 
-        remove_file(dirname, entry->d_name);
+        unlink(entry->d_name);
     }
     errno = 0;
   }
-  os::closedir(dirp);
+
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
+
   FREE_C_HEAP_ARRAY(char, dbuf);
 }
 
@@ -653,19 +823,54 @@
     return -1;
   }
 
+  int saved_cwd_fd;
+  // open the directory and set the current working directory to it
+  DIR* dirp = open_directory_secure_cwd(dirname, &saved_cwd_fd);
+  if (dirp == NULL) {
+    // Directory doesn't exist or is insecure, so cannot create shared
+    // memory file.
+    return -1;
+  }
+
+  // Open the filename in the current directory.
+  // Cannot use O_TRUNC here; truncation of an existing file has to happen
+  // after the is_file_secure() check below.
   int result;
-
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
-      warning("could not create file %s: %s\n", filename, strerror(errno));
+      if (errno == ELOOP) {
+        warning("file %s is a symlink and is not secure\n", filename);
+      } else {
+        warning("could not create file %s: %s\n", filename, strerror(errno));
+      }
     }
+    // close the directory and reset the current working directory
+    close_directory_secure_cwd(dirp, saved_cwd_fd);
+
     return -1;
   }
+  // close the directory and reset the current working directory
+  close_directory_secure_cwd(dirp, saved_cwd_fd);
 
   // save the file descriptor
   int fd = result;
 
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  // truncate the file to get rid of any existing data
+  RESTARTABLE(::ftruncate(fd, (off_t)0), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not truncate shared memory file: %s\n", strerror(errno));
+    }
+    ::close(fd);
+    return -1;
+  }
   // set the file size
   RESTARTABLE(::ftruncate(fd, (off_t)size), result);
   if (result == OS_ERR) {
@@ -701,8 +906,15 @@
       THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
     }
   }
+  int fd = result;
 
-  return result;
+  // check to see if the file is secure
+  if (!is_file_secure(fd, filename)) {
+    ::close(fd);
+    return -1;
+  }
+
+  return fd;
 }
 
 // create a named shared memory region. returns the address of the
@@ -734,13 +946,21 @@
   char* dirname = get_user_tmp_dir(user_name);
   char* filename = get_sharedmem_filename(dirname, vmid);
 
+  // get the short filename
+  char* short_filename = strrchr(filename, '/');
+  if (short_filename == NULL) {
+    short_filename = filename;
+  } else {
+    short_filename++;
+  }
+
   // cleanup any stale shared memory files
   cleanup_sharedmem_resources(dirname);
 
   assert(((size > 0) && (size % os::vm_page_size() == 0)),
          "unexpected PerfMemory region size");
 
-  fd = create_sharedmem_resources(dirname, filename, size);
+  fd = create_sharedmem_resources(dirname, short_filename, size);
 
   FREE_C_HEAP_ARRAY(char, user_name);
   FREE_C_HEAP_ARRAY(char, dirname);
@@ -856,12 +1076,12 @@
   // constructs for the file and the shared memory mapping.
   if (mode == PerfMemory::PERF_MODE_RO) {
     mmap_prot = PROT_READ;
-    file_flags = O_RDONLY;
+    file_flags = O_RDONLY | O_NOFOLLOW;
   }
   else if (mode == PerfMemory::PERF_MODE_RW) {
 #ifdef LATER
     mmap_prot = PROT_READ | PROT_WRITE;
-    file_flags = O_RDWR;
+    file_flags = O_RDWR | O_NOFOLLOW;
 #else
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
               "Unsupported access mode");
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -601,15 +601,6 @@
   LIR_Opr _addr;
   LIR_Opr _new_val;
 
-  static jbyte* _byte_map_base;
-  static jbyte* byte_map_base_slow();
-  static jbyte* byte_map_base() {
-    if (_byte_map_base == NULL) {
-      _byte_map_base = byte_map_base_slow();
-    }
-    return _byte_map_base;
-  }
-
  public:
   // addr (the address of the object head) and new_val must be registers.
   G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -32,6 +32,7 @@
 #include "ci/ciArrayKlass.hpp"
 #include "ci/ciInstance.hpp"
 #include "ci/ciObjArray.hpp"
+#include "runtime/arguments.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/vm_version.hpp"
@@ -3351,7 +3352,12 @@
   if (!x->inlinee()->is_accessor()) {
     CodeEmitInfo* info = state_for(x, x->state(), true);
     // Notify the runtime very infrequently only to take care of counter overflows
-    increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
+    int freq_log = Tier23InlineeNotifyFreqLog;
+    double scale;
+    if (_method->has_option_value("CompileThresholdScaling", scale)) {
+      freq_log = Arguments::scaled_freq_log(freq_log, scale);
+    }
+    increment_event_counter_impl(info, x->inlinee(), right_n_bits(freq_log), InvocationEntryBci, false, true);
   }
 }
 
@@ -3366,7 +3372,11 @@
     ShouldNotReachHere();
   }
   // Increment the appropriate invocation/backedge counter and notify the runtime.
-  increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
+  double scale;
+  if (_method->has_option_value("CompileThresholdScaling", scale)) {
+    freq_log = Arguments::scaled_freq_log(freq_log, scale);
+  }
+  increment_event_counter_impl(info, info->scope()->method(), right_n_bits(freq_log), bci, backedge, true);
 }
 
 void LIRGenerator::decrement_age(CodeEmitInfo* info) {
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -89,8 +89,8 @@
 public:
   ArgumentMap *_vars;
   ArgumentMap *_stack;
-  short _stack_height;
-  short _max_stack;
+  int _stack_height;
+  int _max_stack;
   bool _initialized;
   ArgumentMap empty_map;
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1905,11 +1905,12 @@
   InstanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
   InstanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
 
-  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
+  initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Cleaner_klass), scan, CHECK);
   InstanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
   InstanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
   InstanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
   InstanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
+  InstanceKlass::cast(WK_KLASS(Cleaner_klass))->set_reference_type(REF_CLEANER);
 
   // JSR 292 classes
   WKID jsr292_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
--- a/src/share/vm/classfile/systemDictionary.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -128,6 +128,7 @@
   do_klass(WeakReference_klass,                         java_lang_ref_WeakReference,               Pre                 ) \
   do_klass(FinalReference_klass,                        java_lang_ref_FinalReference,              Pre                 ) \
   do_klass(PhantomReference_klass,                      java_lang_ref_PhantomReference,            Pre                 ) \
+  do_klass(Cleaner_klass,                               sun_misc_Cleaner,                          Pre                 ) \
   do_klass(Finalizer_klass,                             java_lang_ref_Finalizer,                   Pre                 ) \
                                                                                                                          \
   do_klass(Thread_klass,                                java_lang_Thread,                          Pre                 ) \
--- a/src/share/vm/classfile/verifier.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/classfile/verifier.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1560,14 +1560,14 @@
         case Bytecodes::_invokespecial :
         case Bytecodes::_invokestatic :
           verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+            &bcs, code_length, &current_frame, (bci >= ex_min && bci < ex_max),
+            &this_uninit, return_type, cp, &stackmap_table, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_invokeinterface :
         case Bytecodes::_invokedynamic :
           verify_invoke_instructions(
-            &bcs, code_length, &current_frame,
-            &this_uninit, return_type, cp, CHECK_VERIFY(this));
+            &bcs, code_length, &current_frame, (bci >= ex_min && bci < ex_max),
+            &this_uninit, return_type, cp, &stackmap_table, CHECK_VERIFY(this));
           no_control_flow = false; break;
         case Bytecodes::_new :
         {
@@ -2412,8 +2412,9 @@
 
 void ClassVerifier::verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
-    StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
-    constantPoolHandle cp, TRAPS) {
+    StackMapFrame* current_frame, u4 code_length, bool in_try_block,
+    bool *this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    TRAPS) {
   u2 bci = bcs->bci();
   VerificationType type = current_frame->pop_stack(
     VerificationType::reference_check(), CHECK_VERIFY(this));
@@ -2429,28 +2430,36 @@
       return;
     }
 
-    // Check if this call is done from inside of a TRY block.  If so, make
-    // sure that all catch clause paths end in a throw.  Otherwise, this
-    // can result in returning an incomplete object.
-    ExceptionTable exhandlers(_method());
-    int exlength = exhandlers.length();
-    for(int i = 0; i < exlength; i++) {
-      u2 start_pc = exhandlers.start_pc(i);
-      u2 end_pc = exhandlers.end_pc(i);
+    // If this invokespecial call is done from inside of a TRY block then make
+    // sure that all catch clause paths end in a throw.  Otherwise, this can
+    // result in returning an incomplete object.
+    if (in_try_block) {
+      ExceptionTable exhandlers(_method());
+      int exlength = exhandlers.length();
+      for(int i = 0; i < exlength; i++) {
+        u2 start_pc = exhandlers.start_pc(i);
+        u2 end_pc = exhandlers.end_pc(i);
 
-      if (bci >= start_pc && bci < end_pc) {
-        if (!ends_in_athrow(exhandlers.handler_pc(i))) {
-          verify_error(ErrorContext::bad_code(bci),
-            "Bad <init> method call from after the start of a try block");
-          return;
-        } else if (VerboseVerification) {
-          ResourceMark rm;
-          tty->print_cr(
-            "Survived call to ends_in_athrow(): %s",
-                        current_class()->name()->as_C_string());
+        if (bci >= start_pc && bci < end_pc) {
+          if (!ends_in_athrow(exhandlers.handler_pc(i))) {
+            verify_error(ErrorContext::bad_code(bci),
+              "Bad <init> method call from after the start of a try block");
+            return;
+          } else if (VerboseVerification) {
+            ResourceMark rm;
+            tty->print_cr(
+              "Survived call to ends_in_athrow(): %s",
+              current_class()->name()->as_C_string());
+          }
         }
       }
-    }
+
+      // Check the exception handler target stackmaps with the locals from the
+      // incoming stackmap (before initialize_object() changes them to outgoing
+      // state).
+      verify_exception_handler_targets(bci, true, current_frame,
+                                       stackmap_table, CHECK_VERIFY(this));
+    } // in_try_block
 
     current_frame->initialize_object(type, current_type());
     *this_uninit = true;
@@ -2504,6 +2513,13 @@
         }
       }
     }
+    // Check the exception handler target stackmaps with the locals from the
+    // incoming stackmap (before initialize_object() changes them to outgoing
+    // state).
+    if (in_try_block) {
+      verify_exception_handler_targets(bci, *this_uninit, current_frame,
+                                       stackmap_table, CHECK_VERIFY(this));
+    }
     current_frame->initialize_object(type, new_class_type);
   } else {
     verify_error(ErrorContext::bad_type(bci, current_frame->stack_top_ctx()),
@@ -2532,8 +2548,8 @@
 
 void ClassVerifier::verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
-    bool *this_uninit, VerificationType return_type,
-    constantPoolHandle cp, TRAPS) {
+    bool in_try_block, bool *this_uninit, VerificationType return_type,
+    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS) {
   // Make sure the constant pool item is the right type
   u2 index = bcs->get_index_u2();
   Bytecodes::Code opcode = bcs->raw_code();
@@ -2699,7 +2715,8 @@
       opcode != Bytecodes::_invokedynamic) {
     if (method_name == vmSymbols::object_initializer_name()) {  // <init> method
       verify_invoke_init(bcs, index, ref_class_type, current_frame,
-        code_length, this_uninit, cp, CHECK_VERIFY(this));
+        code_length, in_try_block, this_uninit, cp, stackmap_table,
+        CHECK_VERIFY(this));
     } else {   // other methods
       // Ensures that target class is assignable to method class.
       if (opcode == Bytecodes::_invokespecial) {
--- a/src/share/vm/classfile/verifier.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/classfile/verifier.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -301,8 +301,9 @@
 
   void verify_invoke_init(
     RawBytecodeStream* bcs, u2 ref_index, VerificationType ref_class_type,
-    StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
-    constantPoolHandle cp, TRAPS);
+    StackMapFrame* current_frame, u4 code_length, bool in_try_block,
+    bool* this_uninit, constantPoolHandle cp, StackMapTable* stackmap_table,
+    TRAPS);
 
   // Used by ends_in_athrow() to push all handlers that contain bci onto
   // the handler_stack, if the handler is not already on the stack.
@@ -316,8 +317,8 @@
 
   void verify_invoke_instructions(
     RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
-    bool* this_uninit, VerificationType return_type,
-    constantPoolHandle cp, TRAPS);
+    bool in_try_block, bool* this_uninit, VerificationType return_type,
+    constantPoolHandle cp, StackMapTable* stackmap_table, TRAPS);
 
   VerificationType get_newarray_type(u2 index, u2 bci, TRAPS);
   void verify_anewarray(u2 bci, u2 index, constantPoolHandle cp,
--- a/src/share/vm/classfile/vmSymbols.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -79,6 +79,7 @@
   template(java_lang_ref_WeakReference,               "java/lang/ref/WeakReference")              \
   template(java_lang_ref_FinalReference,              "java/lang/ref/FinalReference")             \
   template(java_lang_ref_PhantomReference,            "java/lang/ref/PhantomReference")           \
+  template(sun_misc_Cleaner,                          "sun/misc/Cleaner")                         \
   template(java_lang_ref_Finalizer,                   "java/lang/ref/Finalizer")                  \
   template(java_lang_reflect_AccessibleObject,        "java/lang/reflect/AccessibleObject")       \
   template(java_lang_reflect_Method,                  "java/lang/reflect/Method")                 \
--- a/src/share/vm/code/codeCache.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/code/codeCache.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -233,8 +233,8 @@
 ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
   // Determine alignment
   const size_t page_size = os::can_execute_large_page_memory() ?
-          MIN2(os::page_size_for_region(InitialCodeCacheSize, 8),
-               os::page_size_for_region(size, 8)) :
+          MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8),
+               os::page_size_for_region_aligned(size, 8)) :
           os::vm_page_size();
   const size_t granularity = os::vm_allocation_granularity();
   const size_t r_align = MAX2(page_size, granularity);
--- a/src/share/vm/code/dependencies.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/code/dependencies.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -560,7 +560,7 @@
       put_star = !Dependencies::is_concrete_klass((Klass*)arg.metadata_value());
     } else if (arg.is_method()) {
       what = "method ";
-      put_star = !Dependencies::is_concrete_method((Method*)arg.metadata_value());
+      put_star = !Dependencies::is_concrete_method((Method*)arg.metadata_value(), NULL);
     } else if (arg.is_klass()) {
       what = "class  ";
     } else {
@@ -878,8 +878,8 @@
         // Static methods don't override non-static so punt
         return true;
       }
-      if (   !Dependencies::is_concrete_method(lm)
-          && !Dependencies::is_concrete_method(m)
+      if (   !Dependencies::is_concrete_method(lm, k)
+          && !Dependencies::is_concrete_method(m, ctxk)
           && lm->method_holder()->is_subtype_of(m->method_holder()))
         // Method m is overridden by lm, but both are non-concrete.
         return true;
@@ -915,8 +915,17 @@
     } else if (!k->oop_is_instance()) {
       return false; // no methods to find in an array type
     } else {
-      Method* m = InstanceKlass::cast(k)->find_method(_name, _signature);
-      if (m == NULL || !Dependencies::is_concrete_method(m))  return false;
+      // Search class hierarchy first.
+      Method* m = InstanceKlass::cast(k)->find_instance_method(_name, _signature);
+      if (!Dependencies::is_concrete_method(m, k)) {
+        // Check interface defaults also, if any exist.
+        Array<Method*>* default_methods = InstanceKlass::cast(k)->default_methods();
+        if (default_methods == NULL)
+            return false;
+        m = InstanceKlass::cast(k)->find_method(default_methods, _name, _signature);
+        if (!Dependencies::is_concrete_method(m, NULL))
+            return false;
+      }
       _found_methods[_num_participants] = m;
       // Note:  If add_participant(k) is called,
       // the method m will already be memoized for it.
@@ -1209,15 +1218,17 @@
   return true;
 }
 
-bool Dependencies::is_concrete_method(Method* m) {
-  // Statics are irrelevant to virtual call sites.
-  if (m->is_static())  return false;
-
-  // We could also return false if m does not yet appear to be
-  // executed, if the VM version supports this distinction also.
-  // Default methods are considered "concrete" as well.
-  return !m->is_abstract() &&
-         !m->is_overpass(); // error functions aren't concrete
+bool Dependencies::is_concrete_method(Method* m, Klass * k) {
+  // NULL is not a concrete method,
+  // statics are irrelevant to virtual call sites,
+  // abstract methods are not concrete,
+  // overpass (error) methods are not concrete if k is abstract
+  //
+  // note "true" is conservative answer --
+  //     overpass clause is false if k == NULL, implies return true if
+  //     answer depends on overpass clause.
+  return ! ( m == NULL || m -> is_static() || m -> is_abstract() ||
+             m->is_overpass() && k != NULL && k -> is_abstract() );
 }
 
 
@@ -1242,16 +1253,6 @@
   return true;
 }
 
-bool Dependencies::is_concrete_method(ciMethod* m) {
-  // Statics are irrelevant to virtual call sites.
-  if (m->is_static())  return false;
-
-  // We could also return false if m does not yet appear to be
-  // executed, if the VM version supports this distinction also.
-  return !m->is_abstract();
-}
-
-
 bool Dependencies::has_finalizable_subclass(ciInstanceKlass* k) {
   return k->has_finalizable_subclass();
 }
@@ -1469,7 +1470,7 @@
   Klass* wit = wf.find_witness_definer(ctxk);
   if (wit != NULL)  return NULL;  // Too many witnesses.
   Method* fm = wf.found_method(0);  // Will be NULL if num_parts == 0.
-  if (Dependencies::is_concrete_method(m)) {
+  if (Dependencies::is_concrete_method(m, ctxk)) {
     if (fm == NULL) {
       // It turns out that m was always the only implementation.
       fm = m;
@@ -1499,61 +1500,6 @@
   return wf.find_witness_definer(ctxk, changes);
 }
 
-// Find the set of all non-abstract methods under ctxk that match m[0].
-// (The method m[0] must be defined or inherited in ctxk.)
-// Include m itself in the set, unless it is abstract.
-// Fill the given array m[0..(mlen-1)] with this set, and return the length.
-// (The length may be zero if no concrete methods are found anywhere.)
-// If there are too many concrete methods to fit in marray, return -1.
-int Dependencies::find_exclusive_concrete_methods(Klass* ctxk,
-                                                  int mlen,
-                                                  Method* marray[]) {
-  Method* m0 = marray[0];
-  ClassHierarchyWalker wf(m0);
-  assert(wf.check_method_context(ctxk, m0), "proper context");
-  wf.record_witnesses(mlen);
-  bool participants_hide_witnesses = true;
-  Klass* wit = wf.find_witness_definer(ctxk);
-  if (wit != NULL)  return -1;  // Too many witnesses.
-  int num = wf.num_participants();
-  assert(num <= mlen, "oob");
-  // Keep track of whether m is also part of the result set.
-  int mfill = 0;
-  assert(marray[mfill] == m0, "sanity");
-  if (Dependencies::is_concrete_method(m0))
-    mfill++;  // keep m0 as marray[0], the first result
-  for (int i = 0; i < num; i++) {
-    Method* fm = wf.found_method(i);
-    if (fm == m0)  continue;  // Already put this guy in the list.
-    if (mfill == mlen) {
-      return -1;              // Oops.  Too many methods after all!
-    }
-    marray[mfill++] = fm;
-  }
-#ifndef PRODUCT
-  // Make sure the dependency mechanism will pass this discovery:
-  if (VerifyDependencies) {
-    // Turn off dependency tracing while actually testing deps.
-    FlagSetting fs(TraceDependencies, false);
-    switch (mfill) {
-    case 1:
-      guarantee(NULL == (void *)check_unique_concrete_method(ctxk, marray[0]),
-                "verify dep.");
-      break;
-    case 2:
-      guarantee(NULL == (void *)
-                check_exclusive_concrete_methods(ctxk, marray[0], marray[1]),
-                "verify dep.");
-      break;
-    default:
-      ShouldNotReachHere();  // mlen > 2 yet supported
-    }
-  }
-#endif //PRODUCT
-  return mfill;
-}
-
-
 Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepChange* changes) {
   Klass* search_at = ctxk;
   if (changes != NULL)
@@ -1561,7 +1507,6 @@
   return find_finalizable_subclass(search_at);
 }
 
-
 Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
   assert(call_site    ->is_a(SystemDictionary::CallSite_klass()),     "sanity");
   assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "sanity");
--- a/src/share/vm/code/dependencies.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/code/dependencies.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -288,7 +288,7 @@
   // In that case, there would be a middle ground between concrete
   // and abstract (as defined by the Java language and VM).
   static bool is_concrete_klass(Klass* k);    // k is instantiable
-  static bool is_concrete_method(Method* m);  // m is invocable
+  static bool is_concrete_method(Method* m, Klass* k);  // m is invocable
   static Klass* find_finalizable_subclass(Klass* k);
 
   // These versions of the concreteness queries work through the CI.
@@ -302,7 +302,6 @@
   // not go back into the VM to get their value; they must cache the
   // bit in the CI, either eagerly or lazily.)
   static bool is_concrete_klass(ciInstanceKlass* k); // k appears instantiable
-  static bool is_concrete_method(ciMethod* m);       // m appears invocable
   static bool has_finalizable_subclass(ciInstanceKlass* k);
 
   // As a general rule, it is OK to compile under the assumption that
@@ -349,7 +348,6 @@
   static Klass*    find_unique_concrete_subtype(Klass* ctxk);
   static Method*   find_unique_concrete_method(Klass* ctxk, Method* m);
   static int       find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]);
-  static int       find_exclusive_concrete_methods(Klass* ctxk, int mlen, Method* m[]);
 
   // Create the encoding which will be stored in an nmethod.
   void encode_content_bytes();
--- a/src/share/vm/compiler/compileBroker.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1470,7 +1470,9 @@
 
   // The method may be explicitly excluded by the user.
   bool quietly;
-  if (CompilerOracle::should_exclude(method, quietly)) {
+  double scale;
+  if (CompilerOracle::should_exclude(method, quietly)
+      || (CompilerOracle::has_option_value(method, "CompileThresholdScaling", scale) && scale == 0)) {
     if (!quietly) {
       // This does not happen quietly...
       ResourceMark rm;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -645,7 +645,7 @@
   // Support for parallelizing survivor space rescan
   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
     const size_t max_plab_samples =
-      ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
+      ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
 
     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
@@ -703,6 +703,12 @@
   _inter_sweep_timer.start();  // start of time
 }
 
+size_t CMSCollector::plab_sample_minimum_size() {
+  // The default value of MinTLABSize is 2k, but there is
+  // no way to get the default value if the flag has been overridden.
+  return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
+}
+
 const char* ConcurrentMarkSweepGeneration::name() const {
   return "concurrent mark-sweep generation";
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -737,6 +737,10 @@
   size_t*    _cursor;
   ChunkArray* _survivor_plab_array;
 
+  // A bounded minimum size of PLABs, should not return too small values since
+  // this will affect the size of the data structures used for parallel young gen rescan
+  size_t plab_sample_minimum_size();
+
   // Support for marking stack overflow handling
   bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
   bool par_take_from_overflow_list(size_t num,
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,13 @@
     // active field set to true.
     PtrQueue(qset_, perm, true /* active */) { }
 
+  // Flush before destroying; queue may be used to capture pending work while
+  // doing something else, with auto-flush on completion.
+  ~DirtyCardQueue() { if (!is_permanent()) flush(); }
+
+  // Process queue entries and release resources.
+  void flush() { flush_impl(); }
+
   // Apply the closure to all elements, and reset the index to make the
   // buffer empty.  If a closure application returns "false", return
   // "false" immediately, halting the iteration.  If "consume" is true,
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -162,8 +162,8 @@
          "we should have already filtered out humongous regions");
   assert(_end == orig_end(),
          "we should have already filtered out humongous regions");
-
-  _in_collection_set = false;
+  assert(!_in_collection_set,
+         err_msg("Should not clear heap region %u in the collection set", hrm_index()));
 
   set_allocation_context(AllocationContext::system());
   set_young_index_in_cset(-1);
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -31,11 +31,15 @@
 #include "runtime/thread.inline.hpp"
 
 PtrQueue::PtrQueue(PtrQueueSet* qset, bool perm, bool active) :
-  _qset(qset), _buf(NULL), _index(0), _active(active),
+  _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
   _perm(perm), _lock(NULL)
 {}
 
-void PtrQueue::flush() {
+PtrQueue::~PtrQueue() {
+  assert(_perm || (_buf == NULL), "queue must be flushed before delete");
+}
+
+void PtrQueue::flush_impl() {
   if (!_perm && _buf != NULL) {
     if (_index == _sz) {
       // No work to do.
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,15 +65,18 @@
   Mutex* _lock;
 
   PtrQueueSet* qset() { return _qset; }
+  bool is_permanent() const { return _perm; }
+
+  // Process queue entries and release resources, if not permanent.
+  void flush_impl();
 
 public:
   // Initialize this queue to contain a null buffer, and be part of the
   // given PtrQueueSet.
   PtrQueue(PtrQueueSet* qset, bool perm = false, bool active = false);
-  // Release any contained resources.
-  virtual void flush();
-  // Calls flush() when destroyed.
-  ~PtrQueue() { flush(); }
+
+  // Requires queue flushed or permanent.
+  ~PtrQueue();
 
   // Associate a lock with a ptr queue.
   void set_lock(Mutex* lock) { _lock = lock; }
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -39,7 +39,7 @@
   // first before we flush it, otherwise we might end up with an
   // enqueued buffer with refs into the CSet which breaks our invariants.
   filter();
-  PtrQueue::flush();
+  flush_impl();
 }
 
 // This method removes entries from an SATB buffer that will not be
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,9 +60,8 @@
     // field to true. This is done in JavaThread::initialize_queues().
     PtrQueue(qset, perm, false /* active */) { }
 
-  // Overrides PtrQueue::flush() so that it can filter the buffer
-  // before it is flushed.
-  virtual void flush();
+  // Process queue entries and free resources.
+  void flush();
 
   // Overrides PtrQueue::should_enqueue_buffer(). See the method's
   // definition for more information.
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1194,8 +1194,10 @@
         return real_forwardee(old);
     }
 
-    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
-                                       old, m, sz);
+    if (!_promotion_failed) {
+      new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
+                                        old, m, sz);
+    }
 
     if (new_obj == NULL) {
       // promotion failed, forward to self
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -61,9 +61,9 @@
 
 void GenerationSizer::initialize_size_info() {
   trace_gen_sizes("ps heap raw");
-  const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8);
+  const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8);
   const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
-  const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages);
+  const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages);
   const size_t page_sz = MIN2(max_page_sz, min_page_sz);
 
   // Can a page size be something else than a power of two?
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -41,7 +41,7 @@
 
   const size_t words = bits / BitsPerWord;
   const size_t raw_bytes = words * sizeof(idx_t);
-  const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+  const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -403,7 +403,7 @@
 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 {
   const size_t raw_bytes = count * element_size;
-  const size_t page_sz = os::page_size_for_region(raw_bytes, 10);
+  const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
   const size_t granularity = os::vm_allocation_granularity();
   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -63,7 +63,8 @@
   virtual ~ParGCAllocBuffer() {}
 
   static const size_t min_size() {
-    return ThreadLocalAllocBuffer::min_size();
+    // Make sure that we return something that is larger than AlignmentReserve
+    return align_object_size(MAX2(MinTLABSize / HeapWordSize, (uintx)oopDesc::header_size())) + AlignmentReserve;
   }
 
   static const size_t max_size() {
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -53,7 +53,7 @@
 /*
  * USELABELS - If using GCC, then use labels for the opcode dispatching
  * rather -then a switch statement. This improves performance because it
- * gives us the oportunity to have the instructions that calculate the
+ * gives us the opportunity to have the instructions that calculate the
  * next opcode to jump to be intermixed with the rest of the instructions
  * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
  */
--- a/src/share/vm/interpreter/invocationCounter.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/interpreter/invocationCounter.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -36,7 +36,7 @@
 // Implementation notes: For space reasons, state & counter are both encoded in one word,
 // The state is encoded using some of the least significant bits, the counter is using the
 // more significant bits. The counter is incremented before a method is activated and an
-// action is triggered when when count() > limit().
+// action is triggered when count() > limit().
 
 class InvocationCounter VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
@@ -48,7 +48,6 @@
     number_of_state_bits = 2,
     number_of_carry_bits = 1,
     number_of_noncount_bits = number_of_state_bits + number_of_carry_bits,
-    number_of_count_bits = BitsPerInt - number_of_noncount_bits,
     state_limit          = nth_bit(number_of_state_bits),
     count_grain          = nth_bit(number_of_state_bits + number_of_carry_bits),
     carry_mask           = right_n_bits(number_of_carry_bits) << number_of_state_bits,
@@ -68,6 +67,7 @@
     count_increment      = count_grain,          // use this value to increment the 32bit _counter word
     count_mask_value     = count_mask,           // use this value to mask the backedge counter
     count_shift          = number_of_noncount_bits,
+    number_of_count_bits = BitsPerInt - number_of_noncount_bits,
     count_limit          = nth_bit(number_of_count_bits - 1)
   };
 
--- a/src/share/vm/memory/heap.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/heap.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -104,8 +104,8 @@
   size_t page_size = os::vm_page_size();
   if (os::can_execute_large_page_memory()) {
     const size_t min_pages = 8;
-    page_size = MIN2(os::page_size_for_region(committed_size, min_pages),
-                     os::page_size_for_region(rs.size(), min_pages));
+    page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages),
+                     os::page_size_for_region_aligned(rs.size(), min_pages));
   }
 
   const size_t granularity = os::vm_allocation_granularity();
--- a/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -118,6 +118,7 @@
   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
+  _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q];
 
   // Initialize all entries to NULL
   for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
@@ -246,6 +247,13 @@
     phantom_count =
       process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                  is_alive, keep_alive, complete_gc, task_executor);
+
+    // Process cleaners, but include them in phantom statistics.  We expect
+    // Cleaner references to be temporary, and don't want to deal with
+    // possible incompatibilities arising from making it more visible.
+    phantom_count +=
+      process_discovered_reflist(_discoveredCleanerRefs, NULL, false,
+                                 is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Weak global JNI references. It would make more sense (semantically) to
@@ -885,6 +893,7 @@
   balance_queues(_discoveredWeakRefs);
   balance_queues(_discoveredFinalRefs);
   balance_queues(_discoveredPhantomRefs);
+  balance_queues(_discoveredCleanerRefs);
 }
 
 size_t
@@ -998,6 +1007,9 @@
     case REF_PHANTOM:
       list = &_discoveredPhantomRefs[id];
       break;
+    case REF_CLEANER:
+      list = &_discoveredCleanerRefs[id];
+      break;
     case REF_NONE:
       // we should not reach here if we are an InstanceRefKlass
     default:
@@ -1263,6 +1275,17 @@
       preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
                                   keep_alive, complete_gc, yield);
     }
+
+    // Cleaner references.  Included in timing for phantom references.  We
+    // expect Cleaner references to be temporary, and don't want to deal with
+    // possible incompatibilities arising from making it more visible.
+    for (uint i = 0; i < _max_num_q; i++) {
+      if (yield->should_return()) {
+        return;
+      }
+      preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive,
+                                  keep_alive, complete_gc, yield);
+    }
   }
 }
 
@@ -1331,6 +1354,7 @@
      case 1: return "WeakRef";
      case 2: return "FinalRef";
      case 3: return "PhantomRef";
+     case 4: return "CleanerRef";
    }
    ShouldNotReachHere();
    return NULL;
--- a/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -264,9 +264,10 @@
   DiscoveredList* _discoveredWeakRefs;
   DiscoveredList* _discoveredFinalRefs;
   DiscoveredList* _discoveredPhantomRefs;
+  DiscoveredList* _discoveredCleanerRefs;
 
  public:
-  static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
+  static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); }
 
   uint num_q()                             { return _num_q; }
   uint max_num_q()                         { return _max_num_q; }
--- a/src/share/vm/memory/referenceType.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/referenceType.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -35,7 +35,8 @@
   REF_SOFT,      // Subclass of java/lang/ref/SoftReference
   REF_WEAK,      // Subclass of java/lang/ref/WeakReference
   REF_FINAL,     // Subclass of java/lang/ref/FinalReference
-  REF_PHANTOM    // Subclass of java/lang/ref/PhantomReference
+  REF_PHANTOM,   // Subclass of java/lang/ref/PhantomReference
+  REF_CLEANER    // Subclass of sun/misc/Cleaner
 };
 
 #endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -235,22 +235,19 @@
 }
 
 size_t ThreadLocalAllocBuffer::initial_desired_size() {
-  size_t init_sz;
+  size_t init_sz = 0;
 
   if (TLABSize > 0) {
-    init_sz = MIN2(TLABSize / HeapWordSize, max_size());
-  } else if (global_stats() == NULL) {
-    // Startup issue - main thread initialized before heap initialized.
-    init_sz = min_size();
-  } else {
+    init_sz = TLABSize / HeapWordSize;
+  } else if (global_stats() != NULL) {
     // Initial size is a function of the average number of allocating threads.
     unsigned nof_threads = global_stats()->allocating_threads_avg();
 
     init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
                       (nof_threads * target_refills());
     init_sz = align_object_size(init_sz);
-    init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
   }
+  init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
   return init_sz;
 }
 
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -106,7 +106,7 @@
     // do nothing.  tlabs must be inited by initialize() calls
   }
 
-  static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize); }
+  static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
   static const size_t max_size()                 { assert(_max_size != 0, "max_size not set up"); return _max_size; }
   static void set_max_size(size_t max_size)      { _max_size = max_size; }
 
--- a/src/share/vm/oops/instanceKlass.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1434,6 +1434,12 @@
   return meth;
 }
 
+// find_instance_method looks up the name/signature in the local methods array
+// and skips over static methods
+Method* InstanceKlass::find_instance_method(Symbol* name, Symbol* signature) {
+    return InstanceKlass::find_instance_method(methods(), name, signature);
+}
+
 // find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
@@ -1446,6 +1452,12 @@
   return hit >= 0 ? methods->at(hit): NULL;
 }
 
+bool InstanceKlass::method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static) {
+    return (m->signature() == signature) &&
+            (!skipping_overpass || !m->is_overpass()) &&
+            (!skipping_static || !m->is_static());
+}
+
 // Used directly for default_methods to find the index into the
 // default_vtable_indices, and indirectly by find_method
 // find_method_index looks in the local methods array to return the index
@@ -1460,13 +1472,10 @@
   int hit = binary_search(methods, name);
   if (hit != -1) {
     Method* m = methods->at(hit);
+
     // Do linear search to find matching signature.  First, quick check
     // for common case, ignoring overpasses if requested.
-    if ((m->signature() == signature) &&
-        (!skipping_overpass || !m->is_overpass()) &&
-        (!skipping_static || !m->is_static())) {
-      return hit;
-    }
+    if (method_matches(m, signature, skipping_overpass, skipping_static)) return hit;
 
     // search downwards through overloaded methods
     int i;
@@ -1474,22 +1483,14 @@
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if ((m->signature() == signature) &&
-            (!skipping_overpass || !m->is_overpass()) &&
-            (!skipping_static || !m->is_static())) {
-          return i;
-        }
+        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if ((m->signature() == signature) &&
-            (!skipping_overpass || !m->is_overpass()) &&
-            (!skipping_static || !m->is_static())) {
-          return i;
-        }
+        if (method_matches(m, signature, skipping_overpass, skipping_static)) return i;
     }
     // not found
 #ifdef ASSERT
--- a/src/share/vm/oops/instanceKlass.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -490,8 +490,14 @@
   // find a local method (returns NULL if not found)
   Method* find_method(Symbol* name, Symbol* signature) const;
   static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
+
+  // find a local method, but skip static methods
+  Method* find_instance_method(Symbol* name, Symbol* signature);
   static Method* find_instance_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
 
+  // true if method matches signature and conforms to skipping_X conditions.
+  static bool method_matches(Method* m, Symbol* signature, bool skipping_overpass, bool skipping_static);
+
   // find a local method index in default_methods (returns -1 if not found)
   static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature,
                                OverpassLookupMode overpass_mode, StaticLookupMode static_mode);
--- a/src/share/vm/oops/method.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/method.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -412,15 +412,14 @@
   }
 
   methodHandle mh(m);
-  ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
-  MethodCounters* counters = MethodCounters::allocate(loader_data, THREAD);
+  MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CompileBroker::log_metaspace_failure();
     ClassLoaderDataGraph::set_metaspace_oom(true);
     return NULL;   // return the exception (which is cleared)
   }
   if (!mh->init_method_counters(counters)) {
-    MetadataFactory::free_metadata(loader_data, counters);
+    MetadataFactory::free_metadata(mh->method_holder()->class_loader_data(), counters);
   }
   return mh->method_counters();
 }
--- a/src/share/vm/oops/methodCounters.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/methodCounters.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -23,10 +23,11 @@
  */
 #include "precompiled.hpp"
 #include "oops/methodCounters.hpp"
-#include "runtime/thread.inline.hpp"
+#include "runtime/handles.inline.hpp"
 
-MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) {
-  return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters();
+MethodCounters* MethodCounters::allocate(methodHandle mh, TRAPS) {
+  ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
+  return new(loader_data, size(), false, MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
 }
 
 void MethodCounters::clear_counters() {
--- a/src/share/vm/oops/methodCounters.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/methodCounters.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -26,7 +26,9 @@
 #define SHARE_VM_OOPS_METHODCOUNTERS_HPP
 
 #include "oops/metadata.hpp"
+#include "compiler/compilerOracle.hpp"
 #include "interpreter/invocationCounter.hpp"
+#include "runtime/arguments.hpp"
 
 class MethodCounters: public MetaspaceObj {
  friend class VMStructs;
@@ -45,7 +47,11 @@
   // 3. (INT_MIN..0]                  - method is hot and will deopt and get
   //                                    recompiled without the counters
   int               _nmethod_age;
-
+  int               _interpreter_invocation_limit;        // per-method InterpreterInvocationLimit
+  int               _interpreter_backward_branch_limit;   // per-method InterpreterBackwardBranchLimit
+  int               _interpreter_profile_limit;           // per-method InterpreterProfileLimit
+  int               _invoke_mask;                         // per-method Tier0InvokeNotifyFreqLog
+  int               _backedge_mask;                       // per-method Tier0BackedgeNotifyFreqLog
 #ifdef TIERED
   float             _rate;                        // Events (invocation and backedge counter increments) per millisecond
   jlong             _prev_time;                   // Previous time the rate was acquired
@@ -53,15 +59,15 @@
   u1                _highest_osr_comp_level;      // Same for OSR level
 #endif
 
-  MethodCounters() : _interpreter_invocation_count(0),
-                     _interpreter_throwout_count(0),
-                     _number_of_breakpoints(0),
-                     _nmethod_age(INT_MAX)
+  MethodCounters(methodHandle mh) : _interpreter_invocation_count(0),
+                                    _interpreter_throwout_count(0),
+                                    _number_of_breakpoints(0),
+                                    _nmethod_age(INT_MAX)
 #ifdef TIERED
-                   , _rate(0),
-                     _prev_time(0),
-                     _highest_comp_level(0),
-                     _highest_osr_comp_level(0)
+                                 , _rate(0),
+                                   _prev_time(0),
+                                   _highest_comp_level(0),
+                                   _highest_osr_comp_level(0)
 #endif
   {
     invocation_counter()->init();
@@ -70,10 +76,28 @@
     if (StressCodeAging) {
       set_nmethod_age(HotMethodDetectionLimit);
     }
+
+    // Set per-method thresholds.
+    double scale = 1.0;
+    CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
+
+    int compile_threshold = Arguments::scaled_compile_threshold(CompileThreshold, scale);
+    _interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
+    if (ProfileInterpreter) {
+      // If interpreter profiling is enabled, the backward branch limit
+      // is compared against the method data counter rather than an invocation
+      // counter, therefore no shifting of bits is required.
+      _interpreter_backward_branch_limit = (compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
+    } else {
+      _interpreter_backward_branch_limit = ((compile_threshold * OnStackReplacePercentage) / 100) << InvocationCounter::count_shift;
+    }
+    _interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
+    _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+    _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
   }
 
  public:
-  static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS);
+  static MethodCounters* allocate(methodHandle mh, TRAPS);
 
   void deallocate_contents(ClassLoaderData* loader_data) {}
   DEBUG_ONLY(bool on_stack() { return false; })  // for template
@@ -161,5 +185,24 @@
     return offset_of(MethodCounters, _interpreter_invocation_count);
   }
 
+  static ByteSize interpreter_invocation_limit_offset() {
+    return byte_offset_of(MethodCounters, _interpreter_invocation_limit);
+  }
+
+  static ByteSize interpreter_backward_branch_limit_offset() {
+    return byte_offset_of(MethodCounters, _interpreter_backward_branch_limit);
+  }
+
+  static ByteSize interpreter_profile_limit_offset() {
+    return byte_offset_of(MethodCounters, _interpreter_profile_limit);
+  }
+
+  static ByteSize invoke_mask_offset() {
+    return byte_offset_of(MethodCounters, _invoke_mask);
+  }
+
+  static ByteSize backedge_mask_offset() {
+    return byte_offset_of(MethodCounters, _backedge_mask);
+  }
 };
 #endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP
--- a/src/share/vm/oops/methodData.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/methodData.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -31,6 +31,7 @@
 #include "memory/heapInspection.hpp"
 #include "oops/methodData.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
+#include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/handles.inline.hpp"
@@ -1131,6 +1132,13 @@
   _backedge_counter.init();
   _invocation_counter_start = 0;
   _backedge_counter_start = 0;
+
+  // Set per-method invoke- and backedge mask.
+  double scale = 1.0;
+  CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
+  _invoke_mask = right_n_bits(Arguments::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+  _backedge_mask = right_n_bits(Arguments::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
+
   _tenure_traps = 0;
   _num_loops = 0;
   _num_blocks = 0;
--- a/src/share/vm/oops/methodData.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/oops/methodData.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -2088,6 +2088,8 @@
   int               _invocation_counter_start;
   int               _backedge_counter_start;
   uint              _tenure_traps;
+  int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
+  int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
 
 #if INCLUDE_RTM_OPT
   // State of RTM code generation during compilation of the method
@@ -2447,10 +2449,19 @@
   static ByteSize invocation_counter_offset() {
     return byte_offset_of(MethodData, _invocation_counter);
   }
+
   static ByteSize backedge_counter_offset() {
     return byte_offset_of(MethodData, _backedge_counter);
   }
 
+  static ByteSize invoke_mask_offset() {
+    return byte_offset_of(MethodData, _invoke_mask);
+  }
+
+  static ByteSize backedge_mask_offset() {
+    return byte_offset_of(MethodData, _backedge_mask);
+  }
+
   static ByteSize parameters_type_data_di_offset() {
     return byte_offset_of(MethodData, _parameters_type_data_di);
   }
--- a/src/share/vm/opto/chaitin.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/chaitin.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -582,6 +582,9 @@
   // Peephole remove copies
   post_allocate_copy_removal();
 
+  // Merge multidefs if multiple defs representing the same value are used in a single block.
+  merge_multidefs();
+
 #ifdef ASSERT
   // Veify the graph after RA.
   verify(&live_arena);
--- a/src/share/vm/opto/chaitin.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/chaitin.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -681,6 +681,32 @@
   // Extend the node to LRG mapping
   void add_reference( const Node *node, const Node *old_node);
 
+  // Record the first use of a def in the block for a register.
+  class RegDefUse {
+    Node* _def;
+    Node* _first_use;
+  public:
+    RegDefUse() : _def(NULL), _first_use(NULL) { }
+    Node* def() const       { return _def;       }
+    Node* first_use() const { return _first_use; }
+
+    void update(Node* def, Node* use) {
+      if (_def != def) {
+        _def = def;
+        _first_use = use;
+      }
+    }
+    void clear() {
+      _def = NULL;
+      _first_use = NULL;
+    }
+  };
+  typedef GrowableArray<RegDefUse> RegToDefUseMap;
+  int possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse);
+
+  // Merge nodes that are a part of a multidef lrg and produce the same value within a block.
+  void merge_multidefs();
+
 private:
 
   static int _final_loads, _final_stores, _final_copies, _final_memoves;
--- a/src/share/vm/opto/doCall.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/doCall.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -94,7 +94,7 @@
   if (log != NULL) {
     int rid = (receiver_count >= 0)? log->identify(profile.receiver(0)): -1;
     int r2id = (rid != -1 && profile.has_receiver(1))? log->identify(profile.receiver(1)):-1;
-    log->begin_elem("call method='%d' count='%d' prof_factor='%g'",
+    log->begin_elem("call method='%d' count='%d' prof_factor='%f'",
                     log->identify(callee), site_count, prof_factor);
     if (call_does_dispatch)  log->print(" virtual='1'");
     if (allow_inline)     log->print(" inline='1'");
--- a/src/share/vm/opto/escape.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/escape.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -2010,14 +2010,9 @@
         bt = field->layout_type();
       } else {
         // Check for unsafe oop field access
-        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-          int opcode = n->fast_out(i)->Opcode();
-          if (opcode == Op_StoreP || opcode == Op_LoadP ||
-              opcode == Op_StoreN || opcode == Op_LoadN) {
-            bt = T_OBJECT;
-            (*unsafe) = true;
-            break;
-          }
+        if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
+          bt = T_OBJECT;
+          (*unsafe) = true;
         }
       }
     } else if (adr_type->isa_aryptr()) {
@@ -2031,13 +2026,8 @@
       }
     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
       // Allocation initialization, ThreadLocal field access, unsafe access
-      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-        int opcode = n->fast_out(i)->Opcode();
-        if (opcode == Op_StoreP || opcode == Op_LoadP ||
-            opcode == Op_StoreN || opcode == Op_LoadN) {
-          bt = T_OBJECT;
-          break;
-        }
+      if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN)) {
+        bt = T_OBJECT;
       }
     }
   }
@@ -3092,13 +3082,7 @@
         continue;
     } else if (n->Opcode() == Op_EncodeISOArray) {
       // get the memory projection
-      for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-        Node *use = n->fast_out(i);
-        if (use->Opcode() == Op_SCMemProj) {
-          n = use;
-          break;
-        }
-      }
+      n = n->find_out_with(Op_SCMemProj);
       assert(n->Opcode() == Op_SCMemProj, "memory projection required");
     } else {
       assert(n->is_Mem(), "memory node required.");
@@ -3122,13 +3106,7 @@
         continue;  // don't push users
       } else if (n->is_LoadStore()) {
         // get the memory projection
-        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
-          Node *use = n->fast_out(i);
-          if (use->Opcode() == Op_SCMemProj) {
-            n = use;
-            break;
-          }
-        }
+        n = n->find_out_with(Op_SCMemProj);
         assert(n->Opcode() == Op_SCMemProj, "memory projection required");
       }
     }
--- a/src/share/vm/opto/ifg.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/ifg.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -535,12 +535,8 @@
       // The method add_input_to_liveout() keeps such nodes alive (put them on liveout list)
       // when it sees SCMemProj node in a block. Unfortunately SCMemProj node could be placed
       // in block in such order that KILL MachProj nodes are processed first.
-      uint cnt = def->outcnt();
-      for (uint i = 0; i < cnt; i++) {
-        Node* proj = def->raw_out(i);
-        if (proj->Opcode() == Op_SCMemProj) {
-          return false;
-        }
+      if (def->has_out_with(Op_SCMemProj)) {
+        return false;
       }
     }
     b->remove_node(location);
--- a/src/share/vm/opto/loopTransform.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/loopTransform.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -2057,10 +2057,9 @@
   }
   Node *main_cmp = main_bol->in(1);
   if( main_cmp->outcnt() > 1 ) { // CmpNode shared?
-    _igvn.hash_delete(main_bol);
     main_cmp = main_cmp->clone();// Clone a private CmpNode
     register_new_node( main_cmp, main_cle->in(0) );
-    main_bol->set_req(1,main_cmp);
+    _igvn.replace_input_of(main_bol, 1, main_cmp);
   }
   // Hack the now-private loop bounds
   _igvn.replace_input_of(main_cmp, 2, main_limit);
--- a/src/share/vm/opto/machnode.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/machnode.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -616,6 +616,29 @@
 #endif
 };
 
+// MachMergeNode is similar to a PhiNode in a sense it merges multiple values,
+// however it doesn't have a control input and is more like a MergeMem.
+// It is inserted after the register allocation is done to ensure that nodes use single
+// definition of a multidef lrg in a block.
+class MachMergeNode : public MachIdealNode {
+public:
+  MachMergeNode(Node *n1) {
+    init_class_id(Class_MachMerge);
+    add_req(NULL);
+    add_req(n1);
+  }
+  virtual const RegMask &out_RegMask() const { return in(1)->out_RegMask(); }
+  virtual const RegMask &in_RegMask(uint idx) const { return in(1)->in_RegMask(idx); }
+  virtual const class Type *bottom_type() const { return in(1)->bottom_type(); }
+  virtual uint ideal_reg() const { return bottom_type()->ideal_reg(); }
+  virtual uint oper_input_base() const { return 1; }
+  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { }
+  virtual uint size(PhaseRegAlloc *ra_) const { return 0; }
+#ifndef PRODUCT
+  virtual const char *Name() const { return "MachMerge"; }
+#endif
+};
+
 //------------------------------MachBranchNode--------------------------------
 // Abstract machine branch Node
 class MachBranchNode : public MachIdealNode {
--- a/src/share/vm/opto/macro.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/macro.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -258,14 +258,7 @@
     // Search for CastP2X->Xor->URShift->Cmp path which
     // checks if the store done to a different from the value's region.
     // And replace Cmp with #0 (false) to collapse G1 post barrier.
-    Node* xorx = NULL;
-    for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) {
-      Node* u = p2x->fast_out(i);
-      if (u->Opcode() == Op_XorX) {
-        xorx = u;
-        break;
-      }
-    }
+    Node* xorx = p2x->find_out_with(Op_XorX);
     assert(xorx != NULL, "missing G1 post barrier");
     Node* shift = xorx->unique_out();
     Node* cmpx = shift->unique_out();
--- a/src/share/vm/opto/memnode.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/memnode.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -2609,7 +2609,6 @@
     return false; // if not a distinct instance, there may be aliases of the address
   for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) {
     Node *use = adr->fast_out(i);
-    int opc = use->Opcode();
     if (use->is_Load() || use->is_LoadStore()) {
       return false;
     }
--- a/src/share/vm/opto/node.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/node.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -881,6 +881,34 @@
     return (Node*) this;
 }
 
+// Find out of current node that matches opcode.
+Node* Node::find_out_with(int opcode) {
+  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+    Node* use = fast_out(i);
+    if (use->Opcode() == opcode) {
+      return use;
+    }
+  }
+  return NULL;
+}
+
+// Return true if the current node has an out that matches opcode.
+bool Node::has_out_with(int opcode) {
+  return (find_out_with(opcode) != NULL);
+}
+
+// Return true if the current node has an out that matches any of the opcodes.
+bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
+  for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
+      int opcode = fast_out(i)->Opcode();
+      if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
+        return true;
+      }
+  }
+  return false;
+}
+
+
 //---------------------------uncast_helper-------------------------------------
 Node* Node::uncast_helper(const Node* p) {
 #ifdef ASSERT
--- a/src/share/vm/opto/node.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/node.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -98,6 +98,7 @@
 class MachSafePointNode;
 class MachSpillCopyNode;
 class MachTempNode;
+class MachMergeNode;
 class Matcher;
 class MemBarNode;
 class MemBarStoreStoreNode;
@@ -436,6 +437,13 @@
     return (this->uncast() == n->uncast());
   }
 
+  // Find out of current node that matches opcode.
+  Node* find_out_with(int opcode);
+  // Return true if the current node has an out that matches opcode.
+  bool has_out_with(int opcode);
+  // Return true if the current node has an out that matches any of the opcodes.
+  bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4);
+
 private:
   static Node* uncast_helper(const Node* n);
 
@@ -507,18 +515,25 @@
 
 //----------------- Other Node Properties
 
-  // Generate class id for some ideal nodes to avoid virtual query
-  // methods is_<Node>().
-  // Class id is the set of bits corresponded to the node class and all its
-  // super classes so that queries for super classes are also valid.
-  // Subclasses of the same super class have different assigned bit
-  // (the third parameter in the macro DEFINE_CLASS_ID).
-  // Classes with deeper hierarchy are declared first.
-  // Classes with the same hierarchy depth are sorted by usage frequency.
+  // Generate class IDs for (some) ideal nodes so that it is possible to determine
+  // the type of a node using a non-virtual method call (the method is_<Node>() below).
   //
-  // The query method masks the bits to cut off bits of subclasses
-  // and then compare the result with the class id
-  // (see the macro DEFINE_CLASS_QUERY below).
+  // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines
+  // the type of the node the ID represents; another subset of an ID's bits are reserved
+  // for the superclasses of the node represented by the ID.
+  //
+  // By design, if A is a supertype of B, A.is_B() returns true and B.is_A()
+  // returns false. A.is_A() returns true.
+  //
+  // If two classes, A and B, have the same superclass, a different bit of A's class id
+  // is reserved for A's type than for B's type. That bit is specified by the third
+  // parameter in the macro DEFINE_CLASS_ID.
+  //
+  // By convention, classes with deeper hierarchy are declared first. Moreover,
+  // classes with the same hierarchy depth are sorted by usage frequency.
+  //
+  // The query method masks the bits to cut off bits of subclasses and then compares
+  // the result with the class id (see the macro DEFINE_CLASS_QUERY below).
   //
   //  Class_MachCall=30, ClassMask_MachCall=31
   // 12               8               4               0
@@ -592,6 +607,7 @@
       DEFINE_CLASS_ID(MachTemp,         Mach, 3)
       DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
       DEFINE_CLASS_ID(MachConstant,     Mach, 5)
+      DEFINE_CLASS_ID(MachMerge,        Mach, 6)
 
     DEFINE_CLASS_ID(Type,  Node, 2)
       DEFINE_CLASS_ID(Phi,   Type, 0)
@@ -763,6 +779,7 @@
   DEFINE_CLASS_QUERY(MachSafePoint)
   DEFINE_CLASS_QUERY(MachSpillCopy)
   DEFINE_CLASS_QUERY(MachTemp)
+  DEFINE_CLASS_QUERY(MachMerge)
   DEFINE_CLASS_QUERY(Mem)
   DEFINE_CLASS_QUERY(MemBar)
   DEFINE_CLASS_QUERY(MemBarStoreStore)
--- a/src/share/vm/opto/parse1.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/parse1.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -441,7 +441,7 @@
 
   CompileLog* log = C->log();
   if (log != NULL) {
-    log->begin_head("parse method='%d' uses='%g'",
+    log->begin_head("parse method='%d' uses='%f'",
                     log->identify(parse_method), expected_uses);
     if (depth() == 1 && C->is_osr_compilation()) {
       log->print(" osr_bci='%d'", C->entry_bci());
--- a/src/share/vm/opto/parse2.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/parse2.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -832,7 +832,7 @@
       sprintf(prob_str_buf, "%g", prob);
       prob_str = prob_str_buf;
     }
-    C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%g' prob='%s'",
+    C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
                    iter().get_dest(), taken, not_taken, cnt, prob_str);
   }
   return prob;
--- a/src/share/vm/opto/phase.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/phase.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -110,6 +110,7 @@
     tty->print_cr ("         Compute Liveness:    %7.3f s", timers[_t_computeLive].seconds());
     tty->print_cr ("         Regalloc Split:      %7.3f s", timers[_t_regAllocSplit].seconds());
     tty->print_cr ("         Postalloc Copy Rem:  %7.3f s", timers[_t_postAllocCopyRemoval].seconds());
+    tty->print_cr ("         Merge multidefs:     %7.3f s", timers[_t_mergeMultidefs].seconds());
     tty->print_cr ("         Fixup Spills:        %7.3f s", timers[_t_fixupSpills].seconds());
     tty->print_cr ("         Compact:             %7.3f s", timers[_t_chaitinCompact].seconds());
     tty->print_cr ("         Coalesce 1:          %7.3f s", timers[_t_chaitinCoalesce1].seconds());
@@ -126,6 +127,7 @@
        timers[_t_computeLive].seconds() +
        timers[_t_regAllocSplit].seconds() +
        timers[_t_postAllocCopyRemoval].seconds() +
+       timers[_t_mergeMultidefs].seconds() +
        timers[_t_fixupSpills].seconds() +
        timers[_t_chaitinCompact].seconds() +
        timers[_t_chaitinCoalesce1].seconds() +
--- a/src/share/vm/opto/phase.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/phase.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -88,6 +88,7 @@
       _t_computeLive,
       _t_regAllocSplit,
       _t_postAllocCopyRemoval,
+      _t_mergeMultidefs,
       _t_fixupSpills,
       _t_chaitinCompact,
       _t_chaitinCoalesce1,
--- a/src/share/vm/opto/postaloc.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/postaloc.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -263,20 +263,6 @@
   // intermediate copies might be illegal, i.e., value is stored down to stack
   // then reloaded BUT survives in a register the whole way.
   Node *val = skip_copies(n->in(k));
-
-  if (val == x && nk_idx != 0 &&
-      regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
-      _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
-    // When rematerialzing nodes and stretching lifetimes, the
-    // allocator will reuse the original def for multidef LRG instead
-    // of the current reaching def because it can't know it's safe to
-    // do so.  After allocation completes if they are in the same LRG
-    // then it should use the current reaching def instead.
-    n->set_req(k, regnd[nk_reg]);
-    blk_adjust += yank_if_dead(val, current_block, &value, &regnd);
-    val = skip_copies(n->in(k));
-  }
-
   if (val == x) return blk_adjust; // No progress?
 
   int n_regs = RegMask::num_registers(val->ideal_reg());
@@ -382,6 +368,94 @@
   return false;
 }
 
+// The algorithms works as follows:
+// We traverse the block top to bottom. possibly_merge_multidef() is invoked for every input edge k
+// of the instruction n. We check to see if the input is a multidef lrg. If it is, we record the fact that we've
+// seen a definition (coming as an input) and add that fact to the reg2defuse array. The array maps registers to their
+// current reaching definitions (we track only multidefs though). With each definition we also associate the first
+// instruction we saw use it. If we encounter the situation when we observe an def (an input) that is a part of the
+// same lrg but is different from the previous seen def we merge the two with a MachMerge node and substitute
+// all the uses that we've seen so far to use the merge. After that we keep replacing the new defs in the same lrg
+// as they get encountered with the merge node and keep adding these defs to the merge inputs.
+void PhaseChaitin::merge_multidefs() {
+  Compile::TracePhase tp("mergeMultidefs", &timers[_t_mergeMultidefs]);
+  ResourceMark rm;
+  // Keep track of the defs seen in registers and collect their uses in the block.
+  RegToDefUseMap reg2defuse(_max_reg, _max_reg, RegDefUse());
+  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+    Block* block = _cfg.get_block(i);
+    for (uint j = 1; j < block->number_of_nodes(); j++) {
+      Node* n = block->get_node(j);
+      if (n->is_Phi()) continue;
+      for (uint k = 1; k < n->req(); k++) {
+        j += possibly_merge_multidef(n, k, block, reg2defuse);
+      }
+      // Null out the value produced by the instruction itself, since we're only interested in defs
+      // implicitly defined by the uses. We are actually interested in tracking only redefinitions
+      // of the multidef lrgs in the same register. For that matter it's enough to track changes in
+      // the base register only and ignore other effects of multi-register lrgs and fat projections.
+      // It is also ok to ignore defs coming from singledefs. After an implicit overwrite by one of
+      // those our register is guaranteed to be used by another lrg and we won't attempt to merge it.
+      uint lrg = _lrg_map.live_range_id(n);
+      if (lrg > 0 && lrgs(lrg).is_multidef()) {
+        OptoReg::Name reg = lrgs(lrg).reg();
+        reg2defuse.at(reg).clear();
+      }
+    }
+    // Clear reg->def->use tracking for the next block
+    for (int j = 0; j < reg2defuse.length(); j++) {
+      reg2defuse.at(j).clear();
+    }
+  }
+}
+
+int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse) {
+  int blk_adjust = 0;
+
+  uint lrg = _lrg_map.live_range_id(n->in(k));
+  if (lrg > 0 && lrgs(lrg).is_multidef()) {
+    OptoReg::Name reg = lrgs(lrg).reg();
+
+    Node* def = reg2defuse.at(reg).def();
+    if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) {
+      // Same lrg but different node, we have to merge.
+      MachMergeNode* merge;
+      if (def->is_MachMerge()) { // is it already a merge?
+        merge = def->as_MachMerge();
+      } else {
+        merge = new MachMergeNode(def);
+
+        // Insert the merge node into the block before the first use.
+        uint use_index = block->find_node(reg2defuse.at(reg).first_use());
+        block->insert_node(merge, use_index++);
+
+        // Let the allocator know about the new node, use the same lrg
+        _lrg_map.extend(merge->_idx, lrg);
+        blk_adjust++;
+
+        // Fixup all the uses (there is at least one) that happened between the first
+        // use and before the current one.
+        for (; use_index < block->number_of_nodes(); use_index++) {
+          Node* use = block->get_node(use_index);
+          if (use == n) {
+            break;
+          }
+          use->replace_edge(def, merge);
+        }
+      }
+      if (merge->find_edge(n->in(k)) == -1) {
+        merge->add_req(n->in(k));
+      }
+      n->set_req(k, merge);
+    }
+
+    // update the uses
+    reg2defuse.at(reg).update(n->in(k), n);
+  }
+
+  return blk_adjust;
+}
+
 
 //------------------------------post_allocate_copy_removal---------------------
 // Post-Allocation peephole copy removal.  We do this in 1 pass over the
--- a/src/share/vm/opto/stringopts.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/opto/stringopts.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1507,10 +1507,12 @@
       }
       case StringConcat::StringMode: {
         const Type* type = kit.gvn().type(arg);
+        Node* count = NULL;
         if (type == TypePtr::NULL_PTR) {
           // replace the argument with the null checked version
           arg = null_string;
           sc->set_argument(argi, arg);
+          count = kit.load_String_length(kit.control(), arg);
         } else if (!type->higher_equal(TypeInstPtr::NOTNULL)) {
           // s = s != null ? s : "null";
           // length = length + (s.count - s.offset);
@@ -1533,10 +1535,13 @@
           // replace the argument with the null checked version
           arg = phi;
           sc->set_argument(argi, arg);
+          count = kit.load_String_length(kit.control(), arg);
+        } else {
+          // A corresponding nullcheck will be connected during IGVN MemNode::Ideal_common_DU_postCCP
+          // kit.control might be a different test, that can be hoisted above the actual nullcheck
+          // in case, that the control input is not null, Ideal_common_DU_postCCP will not look for a nullcheck.
+          count = kit.load_String_length(NULL, arg);
         }
-
-        Node* count = kit.load_String_length(kit.control(), arg);
-
         length = __ AddI(length, count);
         string_sizes->init_req(argi, NULL);
         break;
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -155,7 +155,7 @@
   if (mdo != NULL) {
     int i = mdo->invocation_count_delta();
     int b = mdo->backedge_count_delta();
-    return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
+    return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method);
   }
   return false;
 }
@@ -229,32 +229,32 @@
 // Tier?LoadFeedback is basically a coefficient that determines of
 // how many methods per compiler thread can be in the queue before
 // the threshold values double.
-bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
+bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
   switch(cur_level) {
   case CompLevel_none:
   case CompLevel_limited_profile: {
     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
-    return loop_predicate_helper<CompLevel_none>(i, b, k);
+    return loop_predicate_helper<CompLevel_none>(i, b, k, method);
   }
   case CompLevel_full_profile: {
     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
-    return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
+    return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
   }
   default:
     return true;
   }
 }
 
-bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
+bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
   switch(cur_level) {
   case CompLevel_none:
   case CompLevel_limited_profile: {
     double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
-    return call_predicate_helper<CompLevel_none>(i, b, k);
+    return call_predicate_helper<CompLevel_none>(i, b, k, method);
   }
   case CompLevel_full_profile: {
     double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
-    return call_predicate_helper<CompLevel_full_profile>(i, b, k);
+    return call_predicate_helper<CompLevel_full_profile>(i, b, k, method);
   }
   default:
     return true;
@@ -271,7 +271,7 @@
     int i = method->invocation_count();
     int b = method->backedge_count();
     double k = Tier0ProfilingStartPercentage / 100.0;
-    return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
+    return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method);
   }
   return false;
 }
@@ -348,7 +348,7 @@
       // If we were at full profile level, would we switch to full opt?
       if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
         next_level = CompLevel_full_optimization;
-      } else if ((this->*p)(i, b, cur_level)) {
+      } else if ((this->*p)(i, b, cur_level, method)) {
         // C1-generated fully profiled code is about 30% slower than the limited profile
         // code that has only invocation and backedge counters. The observation is that
         // if C2 queue is large enough we can spend too much time in the fully profiled code
@@ -374,7 +374,7 @@
           if (mdo->would_profile()) {
             if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
                                      Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
-                                     (this->*p)(i, b, cur_level))) {
+                                     (this->*p)(i, b, cur_level, method))) {
               next_level = CompLevel_full_profile;
             }
           } else {
@@ -390,7 +390,7 @@
           if (mdo->would_profile()) {
             int mdo_i = mdo->invocation_count_delta();
             int mdo_b = mdo->backedge_count_delta();
-            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+            if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
               next_level = CompLevel_full_optimization;
             }
           } else {
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -84,7 +84,7 @@
  *   invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
  *   makes a call into the runtime.
  *
- * - Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
+ * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
  *   compilation thresholds.
  *   Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
  *   Other thresholds work as follows:
@@ -100,7 +100,9 @@
  *   The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
  *   noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
  *   from Method* and for 3->4 transition they come from MDO (since profiled invocations are
- *   counted separately).
+ *   counted separately). Finally, if a method does not contain anything worth profiling, a transition
+ *   from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
+ *   what is specified by Tier4InvocationThreshold).
  *
  *   OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
  *
@@ -164,9 +166,9 @@
   // Call and loop predicates determine whether a transition to a higher compilation
   // level should be performed (pointers to predicate functions are passed to common().
   // Predicates also take compiler load into account.
-  typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
-  bool call_predicate(int i, int b, CompLevel cur_level);
-  bool loop_predicate(int i, int b, CompLevel cur_level);
+  typedef bool (AdvancedThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
+  bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
+  bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
   // Common transition function. Given a predicate determines if a method should transition to another level.
   CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
   // Transition functions.
--- a/src/share/vm/runtime/arguments.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1126,16 +1126,35 @@
 }
 #endif
 
-// Returns threshold scaled with CompileThresholdScaling
-intx Arguments::get_scaled_compile_threshold(intx threshold) {
-  return (intx)(threshold * CompileThresholdScaling);
+intx Arguments::scaled_compile_threshold(intx threshold, double scale) {
+  if (scale == 1.0 || scale < 0.0) {
+    return threshold;
+  } else {
+    return (intx)(threshold * scale);
+  }
 }
 
 // Returns freq_log scaled with CompileThresholdScaling
-intx Arguments::get_scaled_freq_log(intx freq_log) {
-  intx scaled_freq = get_scaled_compile_threshold((intx)1 << freq_log);
-  if (scaled_freq == 0) {
-    return 0;
+intx Arguments::scaled_freq_log(intx freq_log, double scale) {
+  // Check if scaling is necessary or negative value was specified.
+  if (scale == 1.0 || scale < 0.0) {
+    return freq_log;
+  }
+
+  // Check value to avoid calculating log2 of 0.
+  if (scale == 0.0) {
+    return 1;
+  }
+
+  intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
+  // Determine the maximum notification frequency value currently supported.
+  // The largest mask value that the interpreter/C1 can handle is
+  // of length InvocationCounter::number_of_count_bits. Mask values are always
+  // one bit shorter then the value of the notification frequency. Set
+  // max_freq_bits accordingly.
+  intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
+  if (scaled_freq > nth_bit(max_freq_bits)) {
+    return max_freq_bits;
   } else {
     return log2_intptr(scaled_freq);
   }
@@ -1180,31 +1199,36 @@
     Tier3InvokeNotifyFreqLog = 0;
     Tier4InvocationThreshold = 0;
   }
+
+  if (CompileThresholdScaling < 0) {
+    vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
+  }
+
   // Scale tiered compilation thresholds
   if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
-    FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, get_scaled_freq_log(Tier0InvokeNotifyFreqLog));
-    FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, get_scaled_freq_log(Tier0BackedgeNotifyFreqLog));
-
-    FLAG_SET_ERGO(intx, Tier3InvocationThreshold, get_scaled_compile_threshold(Tier3InvocationThreshold));
-    FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, get_scaled_compile_threshold(Tier3MinInvocationThreshold));
-    FLAG_SET_ERGO(intx, Tier3CompileThreshold, get_scaled_compile_threshold(Tier3CompileThreshold));
-    FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, get_scaled_compile_threshold(Tier3BackEdgeThreshold));
+    FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
+    FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
 
     // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
     // once these thresholds become supported.
 
-    FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, get_scaled_freq_log(Tier2InvokeNotifyFreqLog));
-    FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, get_scaled_freq_log(Tier2BackedgeNotifyFreqLog));
-
-    FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, get_scaled_freq_log(Tier3InvokeNotifyFreqLog));
-    FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, get_scaled_freq_log(Tier3BackedgeNotifyFreqLog));
-
-    FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, get_scaled_freq_log(Tier23InlineeNotifyFreqLog));
-
-    FLAG_SET_ERGO(intx, Tier4InvocationThreshold, get_scaled_compile_threshold(Tier4InvocationThreshold));
-    FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, get_scaled_compile_threshold(Tier4MinInvocationThreshold));
-    FLAG_SET_ERGO(intx, Tier4CompileThreshold, get_scaled_compile_threshold(Tier4CompileThreshold));
-    FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, get_scaled_compile_threshold(Tier4BackEdgeThreshold));
+    FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
+    FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
   }
 }
 
@@ -3456,7 +3480,7 @@
   }
 
   if ((TieredCompilation && CompileThresholdScaling == 0)
-      || (!TieredCompilation && get_scaled_compile_threshold(CompileThreshold) == 0)) {
+      || (!TieredCompilation && scaled_compile_threshold(CompileThreshold) == 0)) {
     set_mode_flags(_int);
   }
 
@@ -3896,7 +3920,7 @@
     }
     // Scale CompileThreshold
     if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
-      FLAG_SET_ERGO(intx, CompileThreshold, get_scaled_compile_threshold(CompileThreshold));
+      FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
     }
   }
 
--- a/src/share/vm/runtime/arguments.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/arguments.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -328,9 +328,6 @@
   static bool _ClipInlining;
   static bool _CIDynamicCompilePriority;
 
-  // Scale compile thresholds
-  static intx get_scaled_compile_threshold(intx threshold);
-  static intx get_scaled_freq_log(intx freq_log);
   // Tiered
   static void set_tiered_flags();
   static int  get_min_number_of_compiler_threads();
@@ -452,6 +449,18 @@
   static char*  SharedArchivePath;
 
  public:
+  // Scale compile thresholds
+  // Returns threshold scaled with CompileThresholdScaling
+  static intx scaled_compile_threshold(intx threshold, double scale);
+  static intx scaled_compile_threshold(intx threshold) {
+    return scaled_compile_threshold(threshold, CompileThresholdScaling);
+  }
+  // Returns freq_log scaled with CompileThresholdScaling
+  static intx scaled_freq_log(intx freq_log, double scale);
+  static intx scaled_freq_log(intx freq_log) {
+    return scaled_freq_log(freq_log, CompileThresholdScaling);
+  }
+
   // Parses the arguments, first phase
   static jint parse(const JavaVMInitArgs* args);
   // Apply ergonomics
--- a/src/share/vm/runtime/globals.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/globals.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -2477,7 +2477,7 @@
           "Number of compiler threads to run")                              \
                                                                             \
   product(intx, CompilationPolicyChoice, 0,                                 \
-          "which compilation policy (0/1)")                                 \
+          "which compilation policy (0-3)")                                 \
                                                                             \
   develop(bool, UseStackBanging, true,                                      \
           "use stack banging for stack overflow checks (required for "      \
@@ -3528,7 +3528,16 @@
                                                                             \
   product(double, CompileThresholdScaling, 1.0,                             \
           "Factor to control when first compilation happens "               \
-          "(both with and without tiered compilation)")                     \
+          "(both with and without tiered compilation): "                    \
+          "values greater than 1.0 delay counter overflow, "                \
+          "values between 0 and 1.0 rush counter overflow, "                \
+          "value of 1.0 leave compilation thresholds unchanged "            \
+          "value of 0.0 is equivalent to -Xint. "                           \
+          ""                                                                \
+          "Flag can be set as per-method option. "                          \
+          "If a value is specified for a method, compilation thresholds "   \
+          "for that method are scaled by both the value of the global flag "\
+          "and the value of the per-method flag.")                          \
                                                                             \
   product(intx, Tier0InvokeNotifyFreqLog, 7,                                \
           "Interpreter (tier 0) invocation notification frequency")         \
--- a/src/share/vm/runtime/os.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/os.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1401,15 +1401,17 @@
   return (sp > (stack_limit + reserved_area));
 }
 
-size_t os::page_size_for_region(size_t region_size, size_t min_pages) {
+size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
   assert(min_pages > 0, "sanity");
   if (UseLargePages) {
     const size_t max_page_size = region_size / min_pages;
 
     for (size_t i = 0; _page_sizes[i] != 0; ++i) {
       const size_t page_size = _page_sizes[i];
-      if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) {
-        return page_size;
+      if (page_size <= max_page_size) {
+        if (!must_be_aligned || is_size_aligned(region_size, page_size)) {
+          return page_size;
+        }
       }
     }
   }
@@ -1417,6 +1419,14 @@
   return vm_page_size();
 }
 
+size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
+  return page_size_for_region(region_size, min_pages, true);
+}
+
+size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
+  return page_size_for_region(region_size, min_pages, false);
+}
+
 #ifndef PRODUCT
 void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
 {
@@ -1665,17 +1675,17 @@
 
   static size_t large_page_size() {
     const size_t large_page_size_example = 4 * M;
-    return os::page_size_for_region(large_page_size_example, 1);
+    return os::page_size_for_region_aligned(large_page_size_example, 1);
   }
 
-  static void test_page_size_for_region() {
+  static void test_page_size_for_region_aligned() {
     if (UseLargePages) {
       const size_t small_page = small_page_size();
       const size_t large_page = large_page_size();
 
       if (large_page > small_page) {
         size_t num_small_pages_in_large = large_page / small_page;
-        size_t page = os::page_size_for_region(large_page, num_small_pages_in_large);
+        size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large);
 
         assert_eq(page, small_page);
       }
@@ -1688,21 +1698,53 @@
       const size_t large_page = large_page_size();
       if (large_page > small_page) {
         const size_t unaligned_region = large_page + 17;
-        size_t page = os::page_size_for_region(unaligned_region, 1);
+        size_t page = os::page_size_for_region_aligned(unaligned_region, 1);
         assert_eq(page, small_page);
 
         const size_t num_pages = 5;
         const size_t aligned_region = large_page * num_pages;
-        page = os::page_size_for_region(aligned_region, num_pages);
+        page = os::page_size_for_region_aligned(aligned_region, num_pages);
         assert_eq(page, large_page);
       }
     }
   }
 
+  static void test_page_size_for_region_unaligned() {
+    if (UseLargePages) {
+      // Given exact page size, should return that page size.
+      for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+        size_t expected = os::_page_sizes[i];
+        size_t actual = os::page_size_for_region_unaligned(expected, 1);
+        assert_eq(expected, actual);
+      }
+
+      // Given slightly larger size than a page size, return the page size.
+      for (size_t i = 0; os::_page_sizes[i] != 0; i++) {
+        size_t expected = os::_page_sizes[i];
+        size_t actual = os::page_size_for_region_unaligned(expected + 17, 1);
+        assert_eq(expected, actual);
+      }
+
+      // Given a slightly smaller size than a page size,
+      // return the next smaller page size.
+      if (os::_page_sizes[1] > os::_page_sizes[0]) {
+        size_t expected = os::_page_sizes[0];
+        size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1);
+        assert_eq(actual, expected);
+      }
+
+      // Return small page size for values less than a small page.
+      size_t small_page = small_page_size();
+      size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1);
+      assert_eq(small_page, actual);
+    }
+  }
+
  public:
   static void run_tests() {
-    test_page_size_for_region();
+    test_page_size_for_region_aligned();
     test_page_size_for_region_alignment();
+    test_page_size_for_region_unaligned();
   }
 };
 
--- a/src/share/vm/runtime/os.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/os.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -148,6 +148,7 @@
   static void   pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
   static void   pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
 
+  static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
 
  public:
   static void init(void);                      // Called before command line parsing
@@ -267,8 +268,13 @@
 
   // Returns the page size to use for a region of memory.
   // region_size / min_pages will always be greater than or equal to the
-  // returned value.
-  static size_t page_size_for_region(size_t region_size, size_t min_pages);
+  // returned value. The returned value will divide region_size.
+  static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
+
+  // Returns the page size to use for a region of memory.
+  // region_size / min_pages will always be greater than or equal to the
+  // returned value. The returned value might not divide region_size.
+  static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
 
   // Return the largest page size that can be used
   static size_t max_page_size() {
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -257,28 +257,28 @@
 // Call and loop predicates determine whether a transition to a higher
 // compilation level should be performed (pointers to predicate functions
 // are passed to common() transition function).
-bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
+bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
   switch(cur_level) {
   case CompLevel_none:
   case CompLevel_limited_profile: {
-    return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
+    return loop_predicate_helper<CompLevel_none>(i, b, 1.0, method);
   }
   case CompLevel_full_profile: {
-    return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+    return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
   }
   default:
     return true;
   }
 }
 
-bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
+bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
   switch(cur_level) {
   case CompLevel_none:
   case CompLevel_limited_profile: {
-    return call_predicate_helper<CompLevel_none>(i, b, 1.0);
+    return call_predicate_helper<CompLevel_none>(i, b, 1.0, method);
   }
   case CompLevel_full_profile: {
-    return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
+    return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0, method);
   }
   default:
     return true;
@@ -293,8 +293,8 @@
     int i = mdo->invocation_count();
     int b = mdo->backedge_count();
     double k = ProfileMaturityPercentage / 100.0;
-    return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
-           loop_predicate_helper<CompLevel_full_profile>(i, b, k);
+    return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) ||
+           loop_predicate_helper<CompLevel_full_profile>(i, b, k, method);
   }
   return false;
 }
@@ -313,7 +313,7 @@
       // If we were at full profile level, would we switch to full opt?
       if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
         next_level = CompLevel_full_optimization;
-      } else if ((this->*p)(i, b, cur_level)) {
+      } else if ((this->*p)(i, b, cur_level, method)) {
         next_level = CompLevel_full_profile;
       }
       break;
@@ -325,7 +325,7 @@
           if (mdo->would_profile()) {
             int mdo_i = mdo->invocation_count_delta();
             int mdo_b = mdo->backedge_count_delta();
-            if ((this->*p)(mdo_i, mdo_b, cur_level)) {
+            if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
               next_level = CompLevel_full_optimization;
             }
           } else {
--- a/src/share/vm/runtime/simpleThresholdPolicy.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -43,9 +43,9 @@
   // Call and loop predicates determine whether a transition to a higher compilation
   // level should be performed (pointers to predicate functions are passed to common_TF().
   // Predicates also take compiler load into account.
-  typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
-  bool call_predicate(int i, int b, CompLevel cur_level);
-  bool loop_predicate(int i, int b, CompLevel cur_level);
+  typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
+  bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
+  bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
   // Common transition function. Given a predicate determines if a method should transition to another level.
   CompLevel common(Predicate p, Method* method, CompLevel cur_level);
   // Transition functions.
@@ -76,8 +76,8 @@
 
   // Predicate helpers are used by .*_predicate() methods as well as others.
   // They check the given counter values, multiplied by the scale against the thresholds.
-  template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
-  template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
+  template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method);
+  template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method);
 
   // Get a compilation level for a given method.
   static CompLevel comp_level(Method* method) {
--- a/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -25,8 +25,14 @@
 #ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
 #define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
 
+#include "compiler/compilerOracle.hpp"
+
 template<CompLevel level>
-bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
+bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) {
+  double threshold_scaling;
+  if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+    scale *= threshold_scaling;
+  }
   switch(level) {
   case CompLevel_none:
   case CompLevel_limited_profile:
@@ -40,7 +46,11 @@
 }
 
 template<CompLevel level>
-bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
+bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) {
+  double threshold_scaling;
+  if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
+    scale *= threshold_scaling;
+  }
   switch(level) {
   case CompLevel_none:
   case CompLevel_limited_profile:
--- a/src/share/vm/runtime/virtualspace.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/virtualspace.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -38,7 +38,8 @@
 }
 
 ReservedSpace::ReservedSpace(size_t size) {
-  size_t page_size = os::page_size_for_region(size, 1);
+  // Want to use large pages where possible and pad with small pages.
+  size_t page_size = os::page_size_for_region_unaligned(size, 1);
   bool large_pages = page_size != (size_t)os::vm_page_size();
   // Don't force the alignment to be large page aligned,
   // since that will waste memory.
@@ -617,7 +618,7 @@
 
 
 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
-  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
+  const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 }
 
@@ -1239,7 +1240,7 @@
     case Disable:
       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
     case Commit:
-      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
+      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
     }
   }
 
--- a/src/share/vm/runtime/vmStructs.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -351,11 +351,18 @@
   nonstatic_field(MethodData,           _arg_stack,                                    intx)                                  \
   nonstatic_field(MethodData,           _arg_returned,                                 intx)                                  \
   nonstatic_field(MethodData,           _tenure_traps,                                 uint)                                  \
+  nonstatic_field(MethodData,           _invoke_mask,                                  int)                                   \
+  nonstatic_field(MethodData,           _backedge_mask,                                int)                                   \
   nonstatic_field(DataLayout,           _header._struct._tag,                          u1)                                    \
   nonstatic_field(DataLayout,           _header._struct._flags,                        u1)                                    \
   nonstatic_field(DataLayout,           _header._struct._bci,                          u2)                                    \
   nonstatic_field(DataLayout,           _cells[0],                                     intptr_t)                              \
   nonstatic_field(MethodCounters,       _nmethod_age,                                  int)                                   \
+  nonstatic_field(MethodCounters,       _interpreter_invocation_limit,                 int)                                   \
+  nonstatic_field(MethodCounters,       _interpreter_backward_branch_limit,            int)                                   \
+  nonstatic_field(MethodCounters,       _interpreter_profile_limit,                    int)                                   \
+  nonstatic_field(MethodCounters,       _invoke_mask,                                  int)                                   \
+  nonstatic_field(MethodCounters,       _backedge_mask,                                int)                                   \
   nonstatic_field(MethodCounters,       _interpreter_invocation_count,                 int)                                   \
   nonstatic_field(MethodCounters,       _interpreter_throwout_count,                   u2)                                    \
   nonstatic_field(MethodCounters,       _number_of_breakpoints,                        u2)                                    \
@@ -661,6 +668,7 @@
       static_field(SystemDictionary,            WK_KLASS(WeakReference_klass),                 Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(FinalReference_klass),                Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(PhantomReference_klass),              Klass*)                               \
+      static_field(SystemDictionary,            WK_KLASS(Cleaner_klass),                       Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Finalizer_klass),                     Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(Thread_klass),                        Klass*)                               \
       static_field(SystemDictionary,            WK_KLASS(ThreadGroup_klass),                   Klass*)                               \
--- a/src/share/vm/utilities/defaultStream.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/utilities/defaultStream.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -41,6 +41,8 @@
 
   void init();
   void init_log();
+  fileStream* open_file(const char* log_name);
+  void start_log();
   void finish_log();
   void finish_log_on_error(char *buf, int buflen);
  public:
--- a/src/share/vm/utilities/globalDefinitions.hpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Jan 27 20:02:35 2015 -0800
@@ -1142,17 +1142,18 @@
   return ((x != NoLongBits) && (mask_long_bits(x, x - 1) == NoLongBits));
 }
 
-//* largest i such that 2^i <= x
-//  A negative value of 'x' will return '31'
+// Returns largest i such that 2^i <= x.
+// If x < 0, the function returns 31 on a 32-bit machine and 63 on a 64-bit machine.
+// If x == 0, the function returns -1.
 inline int log2_intptr(intptr_t x) {
   int i = -1;
-  uintptr_t p =  1;
+  uintptr_t p = 1;
   while (p != 0 && p <= (uintptr_t)x) {
     // p = 2^(i+1) && p <= x (i.e., 2^(i+1) <= x)
     i++; p *= 2;
   }
   // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1))
-  // (if p = 0 then overflow occurred and i = 31)
+  // If p = 0, overflow has occurred and i = 31 or i = 63 (depending on the machine word size).
   return i;
 }
 
--- a/src/share/vm/utilities/ostream.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/utilities/ostream.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -367,7 +367,6 @@
 
 #define EXTRACHARLEN   32
 #define CURRENTAPPX    ".current"
-#define FILENAMEBUFLEN  1024
 // convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
 char* get_datetime_string(char *buf, size_t len) {
   os::local_time_string(buf, len);
@@ -401,7 +400,6 @@
     buffer_length = strlen(log_name) + 1;
   }
 
-  // const char* star = strchr(basename, '*');
   const char* pts = strstr(basename, "%p");
   int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
 
@@ -416,6 +414,11 @@
     buffer_length += strlen(tms);
   }
 
+  // File name is too long.
+  if (buffer_length > JVM_MAXPATHLEN) {
+    return NULL;
+  }
+
   // Create big enough buffer.
   char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
 
@@ -489,46 +492,88 @@
 void test_loggc_filename() {
   int pid;
   char  tms[32];
-  char  i_result[FILENAMEBUFLEN];
+  char  i_result[JVM_MAXPATHLEN];
   const char* o_result;
   get_datetime_string(tms, sizeof(tms));
   pid = os::current_process_id();
 
   // test.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test.log", tms);
   o_result = make_log_name_internal("test.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
 
   // test-%t-%p.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%s-pid%u.log", tms, pid);
   o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
 
   // test-%t%p.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "test-%spid%u.log", tms, pid);
   o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
 
   // %p%t.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u%s.log", pid, tms);
   o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
 
   // %p-test.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "pid%u-test.log", pid);
   o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
 
   // %t.log
-  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
+  jio_snprintf(i_result, JVM_MAXPATHLEN, "%s.log", tms);
   o_result = make_log_name_internal("%t.log", NULL, pid, tms);
   assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
   FREE_C_HEAP_ARRAY(char, o_result);
+
+  {
+    // longest filename
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', sizeof(longest_name));
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(strcmp(longest_name, o_result) == 0, err_msg("longest name does not match. expected '%s' but got '%s'", longest_name, o_result));
+    FREE_C_HEAP_ARRAY(char, o_result);
+  }
+
+  {
+    // too long file name
+    char too_long_name[JVM_MAXPATHLEN + 100];
+    int too_long_length = sizeof(too_long_name);
+    memset(too_long_name, 'a', too_long_length);
+    too_long_name[too_long_length - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&too_long_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name should return NULL, but got '%s'", o_result));
+  }
+
+  {
+    // too long with timestamp
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', JVM_MAXPATHLEN);
+    longest_name[JVM_MAXPATHLEN - 3] = '%';
+    longest_name[JVM_MAXPATHLEN - 2] = 't';
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name after timestamp expansion should return NULL, but got '%s'", o_result));
+  }
+
+  {
+    // too long with pid
+    char longest_name[JVM_MAXPATHLEN];
+    memset(longest_name, 'a', JVM_MAXPATHLEN);
+    longest_name[JVM_MAXPATHLEN - 3] = '%';
+    longest_name[JVM_MAXPATHLEN - 2] = 'p';
+    longest_name[JVM_MAXPATHLEN - 1] = '\0';
+    o_result = make_log_name_internal((const char*)&longest_name, NULL, pid, tms);
+    assert(o_result == NULL, err_msg("Too long file name after pid expansion should return NULL, but got '%s'", o_result));
+  }
 }
 #endif // PRODUCT
 
@@ -637,9 +682,16 @@
   _bytes_written = 0L;
   _file_name = make_log_name(file_name, NULL);
 
+  if (_file_name == NULL) {
+    warning("Cannot open file %s: file name is too long.\n", file_name);
+    _need_close = false;
+    UseGCLogFileRotation = false;
+    return;
+  }
+
   // gc log file rotation
   if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
-    char tempbuf[FILENAMEBUFLEN];
+    char tempbuf[JVM_MAXPATHLEN];
     jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
     _file = fopen(tempbuf, "w");
   } else {
@@ -671,10 +723,10 @@
 // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 // must be synchronized.
 void gcLogFileStream::rotate_log(bool force, outputStream* out) {
-  char time_msg[FILENAMEBUFLEN];
+  char time_msg[O_BUFLEN];
   char time_str[EXTRACHARLEN];
-  char current_file_name[FILENAMEBUFLEN];
-  char renamed_file_name[FILENAMEBUFLEN];
+  char current_file_name[JVM_MAXPATHLEN];
+  char renamed_file_name[JVM_MAXPATHLEN];
 
   if (!should_rotate(force)) {
     return;
@@ -713,12 +765,15 @@
   // have a form of extended_filename.<i>.current where i is the current rotation
   // file number. After it reaches max file size, the file will be saved and renamed
   // with .current removed from its tail.
-  size_t filename_len = strlen(_file_name);
   if (_file != NULL) {
-    jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
+    jio_snprintf(renamed_file_name, JVM_MAXPATHLEN, "%s.%d",
                  _file_name, _cur_file_num);
-    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
-                 _file_name, _cur_file_num);
+    int result = jio_snprintf(current_file_name, JVM_MAXPATHLEN,
+                              "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
+    if (result >= JVM_MAXPATHLEN) {
+      warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
+      return;
+    }
 
     const char* msg = force ? "GC log rotation request has been received."
                             : "GC log file has reached the maximum size.";
@@ -757,19 +812,23 @@
 
   _cur_file_num++;
   if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
-  jio_snprintf(current_file_name,  filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
+  int result = jio_snprintf(current_file_name,  JVM_MAXPATHLEN, "%s.%d" CURRENTAPPX,
                _file_name, _cur_file_num);
+  if (result >= JVM_MAXPATHLEN) {
+    warning("Cannot create new log file name: %s: file name is too long.\n", current_file_name);
+    return;
+  }
+
   _file = fopen(current_file_name, "w");
 
   if (_file != NULL) {
     _bytes_written = 0L;
     _need_close = true;
     // reuse current_file_name for time_msg
-    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
+    jio_snprintf(current_file_name, JVM_MAXPATHLEN,
                  "%s.%d", _file_name, _cur_file_num);
     jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
-                           os::local_time_string((char *)time_str, sizeof(time_str)),
-                           current_file_name);
+                 os::local_time_string((char *)time_str, sizeof(time_str)), current_file_name);
     write(time_msg, strlen(time_msg));
 
     if (out != NULL) {
@@ -817,32 +876,64 @@
   return _log_file != NULL;
 }
 
+fileStream* defaultStream::open_file(const char* log_name) {
+  const char* try_name = make_log_name(log_name, NULL);
+  if (try_name == NULL) {
+    warning("Cannot open file %s: file name is too long.\n", log_name);
+    return NULL;
+  }
+
+  fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  FREE_C_HEAP_ARRAY(char, try_name);
+  if (file->is_open()) {
+    return file;
+  }
+
+  // Try again to open the file in the temp directory.
+  delete file;
+  char warnbuf[O_BUFLEN*2];
+  jio_snprintf(warnbuf, sizeof(warnbuf), "Warning:  Cannot open log file: %s\n", log_name);
+  // Note:  This feature is for maintainer use only.  No need for L10N.
+  jio_print(warnbuf);
+  try_name = make_log_name(log_name, os::get_temp_directory());
+  if (try_name == NULL) {
+    warning("Cannot open file %s: file name is too long for directory %s.\n", log_name, os::get_temp_directory());
+    return NULL;
+  }
+
+  jio_snprintf(warnbuf, sizeof(warnbuf),
+               "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
+  jio_print(warnbuf);
+
+  file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  FREE_C_HEAP_ARRAY(char, try_name);
+  if (file->is_open()) {
+    return file;
+  }
+
+  delete file;
+  return NULL;
+}
+
 void defaultStream::init_log() {
   // %%% Need a MutexLocker?
   const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
-  const char* try_name = make_log_name(log_name, NULL);
-  fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
-  if (!file->is_open()) {
-    // Try again to open the file.
-    char warnbuf[O_BUFLEN*2];
-    jio_snprintf(warnbuf, sizeof(warnbuf),
-                 "Warning:  Cannot open log file: %s\n", try_name);
-    // Note:  This feature is for maintainer use only.  No need for L10N.
-    jio_print(warnbuf);
-    FREE_C_HEAP_ARRAY(char, try_name);
-    try_name = make_log_name(log_name, os::get_temp_directory());
-    jio_snprintf(warnbuf, sizeof(warnbuf),
-                 "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
-    jio_print(warnbuf);
-    delete file;
-    file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
+  fileStream* file = open_file(log_name);
+
+  if (file != NULL) {
+    _log_file = file;
+    _outer_xmlStream = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
+    start_log();
+  } else {
+    // and leave xtty as NULL
+    LogVMOutput = false;
+    DisplayVMOutput = true;
+    LogCompilation = false;
   }
-  FREE_C_HEAP_ARRAY(char, try_name);
+}
 
-  if (file->is_open()) {
-    _log_file = file;
-    xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
-    _outer_xmlStream = xs;
+void defaultStream::start_log() {
+  xmlStream*xs = _outer_xmlStream;
     if (this == tty)  xtty = xs;
     // Write XML header.
     xs->print_cr("<?xml version='1.0' encoding='UTF-8'?>");
@@ -897,13 +988,6 @@
     xs->head("tty");
     // All further non-markup text gets copied to the tty:
     xs->_text = this;  // requires friend declaration!
-  } else {
-    delete(file);
-    // and leave xtty as NULL
-    LogVMOutput = false;
-    DisplayVMOutput = true;
-    LogCompilation = false;
-  }
 }
 
 // finish_log() is called during normal VM shutdown. finish_log_on_error() is
--- a/src/share/vm/utilities/vmError.cpp	Tue Jan 27 05:51:00 2015 -0800
+++ b/src/share/vm/utilities/vmError.cpp	Tue Jan 27 20:02:35 2015 -0800
@@ -22,6 +22,7 @@
  *
  */
 
+#include <fcntl.h>
 #include "precompiled.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/compileBroker.hpp"
@@ -807,7 +808,8 @@
 static int expand_and_open(const char* pattern, char* buf, size_t buflen, size_t pos) {
   int fd = -1;
   if (Arguments::copy_expand_pid(pattern, strlen(pattern), &buf[pos], buflen - pos)) {
-    fd = open(buf, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    // the O_EXCL flag will cause the open to fail if the file exists
+    fd = open(buf, O_RDWR | O_CREAT | O_EXCL, 0666);
   }
   return fd;
 }
--- a/test/compiler/arguments/CheckCompileThresholdScaling.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/compiler/arguments/CheckCompileThresholdScaling.java	Tue Jan 27 20:02:35 2015 -0800
@@ -54,7 +54,7 @@
     //
     // Tier0InvokeNotifyFreqLog, Tier0BackedgeNotifyFreqLog,
     // Tier3InvocationThreshold, Tier3MinInvocationThreshold,
-    // Tier3CompileThreshold, and Tier3BackEdgeThreshold,
+    // Tier3CompileThreshold, Tier3BackEdgeThreshold,
     // Tier2InvokeNotifyFreqLog, Tier2BackedgeNotifyFreqLog,
     // Tier3InvokeNotifyFreqLog, Tier3BackedgeNotifyFreqLog,
     // Tier23InlineeNotifyFreqLog, Tier4InvocationThreshold,
--- a/test/compiler/codecache/jmx/PoolsIndependenceTest.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/compiler/codecache/jmx/PoolsIndependenceTest.java	Tue Jan 27 20:02:35 2015 -0800
@@ -98,11 +98,13 @@
             return false;
         });
         for (BlobType bt : BlobType.getAvailable()) {
-            int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
-            Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
-                    expectedNotificationsAmount, String.format("Unexpected "
-                            + "amount of notifications for pool: %s",
-                            bt.getMemoryPool().getName()));
+            if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
+                int expectedNotificationsAmount = bt.equals(btype) ? 1 : 0;
+                Asserts.assertEQ(counters.get(bt.getMemoryPool().getName()).get(),
+                        expectedNotificationsAmount, String.format("Unexpected "
+                                + "amount of notifications for pool: %s",
+                                bt.getMemoryPool().getName()));
+            }
         }
         try {
             ((NotificationEmitter) ManagementFactory.getMemoryMXBean()).
--- a/test/compiler/codecache/jmx/ThresholdNotificationsTest.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/compiler/codecache/jmx/ThresholdNotificationsTest.java	Tue Jan 27 20:02:35 2015 -0800
@@ -52,7 +52,9 @@
 
     public static void main(String[] args) {
         for (BlobType bt : BlobType.getAvailable()) {
-            new ThresholdNotificationsTest(bt).runTest();
+            if (CodeCacheUtils.isCodeHeapPredictable(bt)) {
+                new ThresholdNotificationsTest(bt).runTest();
+            }
         }
     }
 
--- a/test/compiler/loopopts/7052494/Test7052494.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/compiler/loopopts/7052494/Test7052494.java	Tue Jan 27 20:02:35 2015 -0800
@@ -25,7 +25,6 @@
 /**
  * @test
  * @bug 7052494
- * @ignore 7154567
  * @summary Eclipse test fails on JDK 7 b142
  *
  * @run main/othervm -Xbatch Test7052494
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/stringopts/TestOptimizeStringConcat.java	Tue Jan 27 20:02:35 2015 -0800
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 SAP AG.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8068909
+ * @key regression
+ * @summary test that string optimizations produce code, that doesn't lead to a crash.
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement TestOptimizeStringConcat
+ * @author axel.siebenborn@sap.com
+ */
+public class TestOptimizeStringConcat {
+
+    static boolean checkArgumentSyntax(String value, String allowedchars, String notallowedchars, String logmsg) {
+        String rc = null;
+
+        int maxchar = 99999;
+        int minchar = 1;
+        if ((allowedchars != null && notallowedchars != null) || minchar > maxchar) {
+            rc = "internal error";
+        } else {
+            if (value == null) {
+                rc = "the value null is not allowed, it is missing";
+            } else if (value != null && minchar > 0 && value.trim().equals("")) {
+                rc = "the value must not be empty";
+            } else if (value != null) {
+                if (value.length() < minchar || value.length() > maxchar) {
+                    if (rc == null) {
+                        rc = "the value length must be between +minchar+ and +maxchar";
+                    }
+                }
+                char[] _value = value.toCharArray();
+                boolean dotfound = false;
+                int i = 1;
+                if (_value[i] == '.' && !dotfound) {
+                    dotfound = true;
+                } else if (allowedchars != null && allowedchars.indexOf(_value[i]) == -1) {
+                    if (rc == null) {
+                        rc = "the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+                    } else {
+                        rc += " / the value contains an illegal character: '" + _value[i] + "', only following characters are allowed: '+allowedchars+'";
+                    }
+                } else if (notallowedchars != null && notallowedchars.indexOf(_value[i]) != -1) {
+                    if (rc == null) {
+                        rc = "the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+                    } else {
+                        rc += " / the value contains an illegal character: '" + _value[i] + "', following characters are not allowed '+notallowedchars+'";
+                    }
+                }
+            }
+        }
+
+        if (rc != null) {
+            System.out.println(logmsg + " ==> " + rc);
+            return false;
+        }
+        return true;
+    }
+
+    public static void main(String[] args) {
+        boolean failed = false;
+        for (int i = 0; i < 10000; i++) {
+            failed |= !checkArgumentSyntax("theName", null, "\"<&", "Error consistencyCheck: name in component definition");
+            failed |= !checkArgumentSyntax(null, null, "\"<&", "Error consistencyCheck: name in component definition");
+            failed |= !checkArgumentSyntax("42", "0123456789.", null, "Error consistencyCheck: counter in component definition");
+        }
+        System.out.println(failed);
+    }
+}
--- a/test/gc/TestNUMAPageSize.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/gc/TestNUMAPageSize.java	Tue Jan 27 20:02:35 2015 -0800
@@ -25,6 +25,7 @@
  * @test TestNUMAPageSize
  * @summary Make sure that start up with NUMA support does not cause problems.
  * @bug 8061467
+ * @requires (vm.opt.AggressiveOpts == null) | (vm.opt.AggressiveOpts == false)
  * @key gc
  * @key regression
  * @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
--- a/test/gc/TestSmallHeap.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/gc/TestSmallHeap.java	Tue Jan 27 20:02:35 2015 -0800
@@ -25,6 +25,7 @@
  * @test TestSmallHeap
  * @bug 8067438
  * @requires vm.gc=="null"
+ * @requires (vm.opt.AggressiveOpts=="null") | (vm.opt.AggressiveOpts=="false")
  * @summary Verify that starting the VM with a small heap works
  * @library /testlibrary /../../test/lib
  * @build TestSmallHeap
@@ -33,8 +34,9 @@
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseSerialGC TestSmallHeap
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseG1GC TestSmallHeap
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx2m -XX:+UseConcMarkSweepGC TestSmallHeap
- *
- * Note: It would be nice to verify the minimal supported heap size (2m) here,
+ */
+
+/* Note: It would be nice to verify the minimal supported heap size (2m) here,
  * but we align the heap size based on the card table size. And the card table
  * size is aligned based on the minimal pages size provided by the os. This
  * means that on most platforms, where the minimal page size is 4k, we get a
--- a/test/gc/g1/TestHumongousCodeCacheRoots.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/gc/g1/TestHumongousCodeCacheRoots.java	Tue Jan 27 20:02:35 2015 -0800
@@ -116,7 +116,14 @@
 
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldHaveExitValue(0);
+    try {
+        output.shouldHaveExitValue(0);
+    } catch (RuntimeException e) {
+        // It's ok if there is no client vm in the jdk.
+        if (output.firstMatch("Unrecognized option: -client") == null) {
+            throw e;
+        }
+    }
 
     return output;
   }
--- a/test/serviceability/dcmd/compiler/CompilerQueueTest.java	Tue Jan 27 05:51:00 2015 -0800
+++ b/test/serviceability/dcmd/compiler/CompilerQueueTest.java	Tue Jan 27 20:02:35 2015 -0800
@@ -25,6 +25,7 @@
  * @test CompilerQueueTest
  * @bug 8054889
  * @library ..
+ * @ignore 8069160
  * @build DcmdUtil CompilerQueueTest
  * @run main CompilerQueueTest
  * @run main/othervm -XX:-TieredCompilation CompilerQueueTest